dirname(dirname(getwd()))
[1] "/Users/h/Documents/projects_local/cue_expectancy"
/Users/h/Documents/projects_local/cue_expectancy

Prepare dataframe

analysis_folder <- "fmri_nps_canlab"

beh <- readr::read_tsv(file.path(main_dir, "analysis/fmri/nilearn/singletrial_rampupplateau/beh/sub-all_task-all_events.tsv"))
Rows: 17400 Columns: 21
── Column specification ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Delimiter: "\t"
chr (20): sub, ses, run, runtype, cue, stimulusintensity, singletrial_fname, expectrating, expectlabel, outcomerating, outcomelabel, exp...
dbl  (1): trial_index

ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
dv <- "nps"
base_dir <- file.path(main_dir, "analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab")
all_nps <- read.csv(file.path(base_dir, "signature-NPS_sub-all_runtype-pvc_event-stimulus.csv"))
all_siips <-  read.csv(file.path(base_dir, "signature-SIIPS_sub-all_runtype-pain_event-stimulus.csv"))

############# NOTE: run once; concatenate all csv files ########################
# file_paths <- list.files(base_dir, pattern =  "sub-.*_signature-NPSroi_.*\\.csv$", full.names = TRUE, recursive = TRUE)
# all_nps <- file_paths %>%
#   map_dfr(~read_csv(.x, show_col_types = FALSE), .id = "file_path")
#
# file_paths <- list.files(base_dir, pattern =  "sub-.*_roi-SIIPS.*\\.csv$", full.names = TRUE, recursive = TRUE)
# all_siips <- file_paths %>%
#   map_dfr(~read_csv(.x, show_col_types = FALSE), .id = "file_path")
################################################################################

df_merge <- inner_join(beh, all_nps, by = "singletrial_fname")
df_merge <- df_merge %>%
  mutate(singletrial_fname = as.character(singletrial_fname)) %>%
  extract(singletrial_fname, into = c("sub", "ses", "run", "runtype", "event", "trial", "cuetype", "stimintensity"),
          regex = "^(sub-\\d+)_(ses-\\d+)_(run-\\d+)_runtype-(pain|vicarious|cognitive)_event-(cue|stimulus)_trial-(\\d+)_cuetype-(high|low)(?:_stimintensity-(high|med|low))?\\.nii.gz$",
          remove = FALSE)

# Adjust the dataframe to handle NA for missing stimintensity, if tidyr version doesn't support `fill`
df_merge$stimintensity[df_merge$stimintensity == ""] <- NA

data <-df_merge[df_merge$runtype == "pain" ,]

# contrast code ________________________________________________________________
data$stim <- NA; data$STIM_linear <- NA; data$STIM_quadratic <- NA;
data$CUE_high_gt_low <- NA;
data$SES_linear <- NA;data$SES_quadratic <- NA
data$stim[data$stimulusintensity == "low_stim"] <-  -0.5 # social influence task
data$stim[data$stimulusintensity == "med_stim"] <- 0 # no influence task
data$stim[data$stimulusintensity == "high_stim"] <-  0.5 # no influence task

data$STIM <- factor(data$stimulusintensity)

# contrast code 1 linear
data$STIM_linear[data$stimulusintensity == "low_stim"] <- -0.5
data$STIM_linear[data$stimulusintensity == "med_stim"] <- 0
data$STIM_linear[data$stimulusintensity == "high_stim"] <- 0.5

# contrast code 2 quadratic
data$STIM_quadratic[data$stimulusintensity == "low_stim"] <- -0.33
data$STIM_quadratic[data$stimulusintensity == "med_stim"] <- 0.66
data$STIM_quadratic[data$stimulusintensity == "high_stim"] <- -0.33

# social cude contrast
data$CUE_high_gt_low[data$cue == "low_cue"] <-  -0.5 # social influence task
data$CUE_high_gt_low[data$cue == "high_cue"] <-  0.5 # no influence task

data$EXPECT <- data$expectrating
data$OUTCOME <- data$outcomerating


data$SES_linear[data$ses == "ses-01"] <- -0.5
data$SES_linear[data$ses == "ses-03"] <- 0
data$SES_linear[data$ses == "ses-04"] <- 0.5

# contrast code 2 quadratic
data$SES_quadratic[data$ses == "ses-01"] <- -0.33
data$SES_quadratic[data$ses == "ses-03"] <- 0.66
data$SES_quadratic[data$ses == "ses-04"] <- -0.33

stim_con1 <- "STIM_linear"
stim_con2 <- "STIM_quadratic"
iv1 <- "CUE_high_gt_low"
dv <- "nps"
dv_keyword <- "NPS"
# load dataframes ______________________________________________________________

# SIIPS <- siips %>%
#   select(-X)
# NPS <- nps %>%
#   select(-X)
beh <- readr::read_tsv(file.path(main_dir, "analysis/fmri/nilearn/singletrial_rampupplateau/beh/sub-all_task-all_events.tsv"))
Rows: 17400 Columns: 21
── Column specification ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Delimiter: "\t"
chr (20): sub, ses, run, runtype, cue, stimulusintensity, singletrial_fname, expectrating, expectlabel, outcomerating, outcomelabel, exp...
dbl  (1): trial_index

ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
# base_dir <- "/Volumes/spacetop_projects_cue/analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab"
NPS <- read.csv(file.path(main_dir, "analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab/signature-NPS_sub-all_runtype-pvc_event-stimulus.csv"))
SIIPS <-  read.csv(file.path(main_dir, "analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab/signature-SIIPS_sub-all_runtype-pain_event-stimulus.csv"))

# merge the three dataframes ___________________________________________________
df_merge1 <- inner_join(beh, NPS, by = "singletrial_fname")
dfmerge <- inner_join(df_merge1, SIIPS, by = "singletrial_fname")
# df_merge <- inner_join(beh, all_nps, by = "singletrial_fname")


dfmerge <- dfmerge %>%
  mutate(singletrial_fname = as.character(singletrial_fname)) %>%
  extract(singletrial_fname, into = c("sub", "ses", "run", "runtype", "event", "trial", "cuetype", "stimintensity"),
          regex = "^(sub-\\d+)_(ses-\\d+)_(run-\\d+)_runtype-(pain|vicarious|cognitive)_event-(cue|stimulus)_trial-(\\d+)_cuetype-(high|low)(?:_stimintensity-(high|med|low))?\\.nii.gz$",
          remove = FALSE)

dfmerge$NPS <- dfmerge$nps
# create folder to save data ___________________________________________________
analysis_dir <- "/Users/h/Documents/projects_local/cue_expectancy/resources/plots_dissertation/ch4"
dir.create(analysis_dir,
           showWarnings = FALSE,
           recursive = TRUE)
savedir <- analysis_dir
lowcuehex <-  "#4274AD" # "#575a5e"
highcuehex <-  "#C5263A" #"#FEAE00"
data <-dfmerge[dfmerge$runtype == "pain" ,]

# contrast code ________________________________________________________________
data$stim <- NA; data$STIM_linear <- NA; data$STIM_quadratic <- NA;
data$CUE_high_gt_low <- NA;
data$SES_linear <- NA;data$SES_quadratic <- NA
data$stim[data$stimulusintensity == "low_stim"] <-  -0.5 # social influence task
data$stim[data$stimulusintensity == "med_stim"] <- 0 # no influence task
data$stim[data$stimulusintensity == "high_stim"] <-  0.5 # no influence task

data$STIM <- factor(data$stimulusintensity)
data$SUB <- factor(data$sub)

# undo string format
data$outcomerating_impute[data$outcomerating_impute == "n/a"] <- NA
data$outcomerating_impute <- as.numeric(data$outcomerating_impute)

data$expectrating_impute[data$expectrating_impute == "n/a"] <- NA
data$expectrating_impute <- as.numeric(data$expectrating_impute)

# contrast code 1 linear
data$STIM_linear[data$stimulusintensity == "low_stim"] <- -0.5
data$STIM_linear[data$stimulusintensity == "med_stim"] <- 0
data$STIM_linear[data$stimulusintensity == "high_stim"] <- 0.5

# contrast code 2 quadratic
data$STIM_quadratic[data$stimulusintensity == "low_stim"] <- -0.33
data$STIM_quadratic[data$stimulusintensity == "med_stim"] <- 0.66
data$STIM_quadratic[data$stimulusintensity == "high_stim"] <- -0.33

# social cude contrast
data$CUE_high_gt_low[data$cue == "low_cue"] <-  -0.5 # social influence task
data$CUE_high_gt_low[data$cue == "high_cue"] <-  0.5 # no influence task

data$EXPECT <- data$expectrating
data$OUTCOME <- data$outcomerating


data$SES_linear[data$ses == "ses-01"] <- -0.5
data$SES_linear[data$ses == "ses-03"] <- 0
data$SES_linear[data$ses == "ses-04"] <- 0.5

# contrast code 2 quadratic
data$SES_quadratic[data$ses == "ses-01"] <- -0.33
data$SES_quadratic[data$ses == "ses-03"] <- 0.66
data$SES_quadratic[data$ses == "ses-04"] <- -0.33

stim_con1 <- "STIM_linear"
stim_con2 <- "STIM_quadratic"
iv1 <- "CUE_high_gt_low"

subjects_with_inadequate_data <- data %>%
  group_by(sub, CUE_high_gt_low, STIM_linear) %>% #SES_linear,
  dplyr::summarise(count = n(), .groups = 'drop') %>%
  filter(count < 3) %>%
  distinct(sub) %>%
  pull(sub)
df_filter <- data %>%
  filter(!(sub %in% subjects_with_inadequate_data))

print(sprintf("after filtering out subjects that have less than 3 trials in cell, we have N=%d -> N=%d",length(unique(data$sub)), length(unique(df_filter$sub)) ))
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"

1 Beh

Beh linear analysis

model.beh <- lmer(outcomerating_impute ~ CUE_high_gt_low*STIM_linear +
       CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low+STIM_linear|sub), data = df_filter)
summary(model.beh)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: outcomerating_impute ~ CUE_high_gt_low * STIM_linear + CUE_high_gt_low *  
    STIM_quadratic + (CUE_high_gt_low + STIM_linear | sub)
   Data: df_filter

REML criterion at convergence: 44907.5

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.8573 -0.5565 -0.0002  0.5477  4.2955 

Random effects:
 Groups   Name            Variance Std.Dev. Corr       
 sub      (Intercept)     814.63   28.542              
          CUE_high_gt_low  59.51    7.715   -0.03      
          STIM_linear     146.40   12.100    0.40  0.07
 Residual                 401.05   20.026              
Number of obs: 5014, groups:  sub, 92

Fixed effects:
                                Estimate Std. Error        df t value Pr(>|t|)    
(Intercept)                      64.0388     2.9917   90.8850  21.406  < 2e-16 ***
CUE_high_gt_low                   8.1019     1.0004   86.6825   8.099 3.19e-12 ***
STIM_linear                      29.9137     1.4594   91.8897  20.498  < 2e-16 ***
STIM_quadratic                    0.8294     0.6058 4739.6474   1.369  0.17101    
CUE_high_gt_low:STIM_linear      -0.5183     1.3873 4741.0874  -0.374  0.70874    
CUE_high_gt_low:STIM_quadratic   -3.7623     1.2116 4740.0763  -3.105  0.00191 ** 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
                     (Intr) CUE_h__ STIM_l STIM_q CUE_hgh_gt_lw:STIM_l
CUE_hgh_gt_          -0.028                                           
STIM_linear           0.348  0.055                                    
STIM_qudrtc           0.000 -0.001  -0.001                            
CUE_hgh_gt_lw:STIM_l  0.001  0.001   0.001 -0.004                     
CUE_hgh_gt_lw:STIM_q  0.000 -0.001  -0.002  0.002 -0.001              
sjPlot::tab_model(model.beh,
                  title = "Multilevel-modeling: \nlmer(Outcome ~ CUE * STIM + (CUE + STIM | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(Outcome ~ CUE * STIM + (CUE + STIM | sub), data = pain)
  outcomerating_impute
Predictors Estimates CI p
(Intercept) 64.04 58.17 – 69.90 <0.001
CUE high gt low 8.10 6.14 – 10.06 <0.001
STIM linear 29.91 27.05 – 32.77 <0.001
STIM quadratic 0.83 -0.36 – 2.02 0.171
CUE high gt low × STIM
linear
-0.52 -3.24 – 2.20 0.709
CUE high gt low × STIM
quadratic
-3.76 -6.14 – -1.39 0.002
Random Effects
σ2 401.05
τ00 sub 814.63
τ11 sub.CUE_high_gt_low 59.51
τ11 sub.STIM_linear 146.40
ρ01 -0.03
0.40
ICC 0.68
N sub 92
Observations 5014
Marginal R2 / Conditional R2 0.117 / 0.718
equatiomatic::extract_eq(model.beh)

\[ \begin{aligned} \operatorname{outcomerating\_impute}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1j[i]}(\operatorname{CUE\_high\_gt\_low}) + \beta_{2j[i]}(\operatorname{STIM\_linear}) + \beta_{3}(\operatorname{STIM\_quadratic}) + \beta_{4}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_linear}) + \beta_{5}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_quadratic}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{1j} \\ &\beta_{2j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{1j}} \\ &\mu_{\beta_{2j}} \end{aligned} \end{array} \right) , \left( \begin{array}{ccc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{1j}} & \rho_{\alpha_{j}\beta_{2j}} \\ \rho_{\beta_{1j}\alpha_{j}} & \sigma^2_{\beta_{1j}} & \rho_{\beta_{1j}\beta_{2j}} \\ \rho_{\beta_{2j}\alpha_{j}} & \rho_{\beta_{2j}\beta_{1j}} & \sigma^2_{\beta_{2j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

Beh lineplot

dv <- "outcomerating_impute"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name <- NA; df_filter$stim_name <- NA
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
stimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
stimcue_groupwise <- summarySEwithin(
  data = stimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
stimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "Outcome rating"
ylim <- c(-10, 60)
dv_keyword <- "outcomerating"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
subset <- stimcue_groupwise[stimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(subset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = "Within pain task: Outcome rating",
                        xlab = "Stimulus intensity", ylab = "Outcome rating")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim(42, 87)
ggsave(file.path(analysis_dir, paste0("beh_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)


k <- cueR::plot_lineplot_twofactor_subset(stimcue_groupwise,
                                    taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("Beh N =", length(unique(stimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "Outcome rating")
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
 ylim(42, 87)
k

ggsave(file.path(analysis_dir, paste0("beh_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)

2 NPS (df_filter)

NPS linear analysis

model.NPS <- lmer(NPS ~ CUE_high_gt_low*STIM_linear +
       CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low+STIM_linear|sub), data = df_filter)
boundary (singular) fit: see help('isSingular')
summary(model.NPS)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: NPS ~ CUE_high_gt_low * STIM_linear + CUE_high_gt_low * STIM_quadratic +      (CUE_high_gt_low + STIM_linear | sub)
   Data: df_filter

REML criterion at convergence: 28051.3

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.4553 -0.5252 -0.0012  0.5343  4.7549 

Random effects:
 Groups   Name            Variance Std.Dev. Corr       
 sub      (Intercept)      3.81734 1.9538              
          CUE_high_gt_low  0.04399 0.2097   -0.05      
          STIM_linear      0.20085 0.4482    0.84  0.49
 Residual                 14.71951 3.8366              
Number of obs: 5029, groups:  sub, 92

Fixed effects:
                                Estimate Std. Error        df t value Pr(>|t|)    
(Intercept)                       5.1782     0.2120   91.2599  24.430  < 2e-16 ***
CUE_high_gt_low                  -0.1654     0.1107  203.6781  -1.495   0.1365    
STIM_linear                       1.2624     0.1409  191.0483   8.958 2.96e-16 ***
STIM_quadratic                    0.2813     0.1159 4858.4992   2.427   0.0152 *  
CUE_high_gt_low:STIM_linear      -0.2640     0.2653 4860.5313  -0.995   0.3196    
CUE_high_gt_low:STIM_quadratic   -0.3100     0.2318 4858.7496  -1.338   0.1810    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
                     (Intr) CUE_h__ STIM_l STIM_q CUE_hgh_gt_lw:STIM_l
CUE_hgh_gt_          -0.011                                           
STIM_linear           0.272  0.041                                    
STIM_qudrtc           0.000 -0.001  -0.001                            
CUE_hgh_gt_lw:STIM_l  0.002  0.002   0.002 -0.004                     
CUE_hgh_gt_lw:STIM_q  0.000 -0.002  -0.004  0.000 -0.002              
optimizer (nloptwrap) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
sjPlot::tab_model(model.NPS,
                  title = "Multilevel-modeling: \nlmer(NPS ~ CUE * STIM + (CUE + STIM | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(NPS ~ CUE * STIM + (CUE + STIM | sub), data = pain)
  NPS
Predictors Estimates CI p
(Intercept) 5.18 4.76 – 5.59 <0.001
CUE high gt low -0.17 -0.38 – 0.05 0.135
STIM linear 1.26 0.99 – 1.54 <0.001
STIM quadratic 0.28 0.05 – 0.51 0.015
CUE high gt low × STIM
linear
-0.26 -0.78 – 0.26 0.320
CUE high gt low × STIM
quadratic
-0.31 -0.76 – 0.14 0.181
Random Effects
σ2 14.72
τ00 sub 3.82
τ11 sub.CUE_high_gt_low 0.04
τ11 sub.STIM_linear 0.20
ρ01 -0.05
0.84
ICC 0.21
N sub 92
Observations 5029
Marginal R2 / Conditional R2 0.016 / 0.220
equatiomatic::extract_eq(model.NPS)

\[ \begin{aligned} \operatorname{NPS}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1j[i]}(\operatorname{CUE\_high\_gt\_low}) + \beta_{2j[i]}(\operatorname{STIM\_linear}) + \beta_{3}(\operatorname{STIM\_quadratic}) + \beta_{4}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_linear}) + \beta_{5}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_quadratic}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{1j} \\ &\beta_{2j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{1j}} \\ &\mu_{\beta_{2j}} \end{aligned} \end{array} \right) , \left( \begin{array}{ccc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{1j}} & \rho_{\alpha_{j}\beta_{2j}} \\ \rho_{\beta_{1j}\alpha_{j}} & \sigma^2_{\beta_{1j}} & \rho_{\beta_{1j}\beta_{2j}} \\ \rho_{\beta_{2j}\alpha_{j}} & \rho_{\beta_{2j}\beta_{1j}} & \sigma^2_{\beta_{2j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

sink("/Users/h/Desktop/NPS_output.txt")  # Redirects output to 'my_output.txt'
summary(model.NPS)
sink(NULL)  #

lineplot

dv <- "NPS"
taskname <- "pain"
subject <- "sub"

# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "NPS"
ylim <- c(-10, 60)
dv_keyword <- "nps"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
NPSsubset <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(NPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim( 3,6)
ggsave(file.path(analysis_dir, paste0("NPS_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)


k <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
  ylim( 4,6)
k

ggsave(file.path(analysis_dir, paste0("NPS_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)

NPS pos (df_filter)

dv <- "npspos"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "NPSpos"
ylim <- c(-10, 60)
dv_keyword <- "nps"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
NPSsubset <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(NPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim( 3,6)
ggsave(file.path(analysis_dir, paste0("NPSpos_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)
Warning: Removed 3 rows containing missing values or values outside the scale range (`geom_line()`).
Warning: Removed 3 rows containing missing values or values outside the scale range (`geom_point()`).
Removed 3 rows containing missing values or values outside the scale range (`geom_point()`).
k <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise,taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("NPS (pos) N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2)
k

ggsave(file.path(analysis_dir, paste0("NPSpos_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)

NPS neg

dv <- "npsneg"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "NPSneg"
ylim <- c(-10, 60)
dv_keyword <- "nps"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
NPSsubset <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(NPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPSneg")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim( 3,6)
ggsave(file.path(analysis_dir, paste0("NPSneg_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)
Warning: Removed 3 rows containing missing values or values outside the scale range (`geom_line()`).
Warning: Removed 3 rows containing missing values or values outside the scale range (`geom_point()`).
Removed 3 rows containing missing values or values outside the scale range (`geom_point()`).
k <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("NPSneg N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPSneg")
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2)
k

ggsave(file.path(analysis_dir, paste0("NPSneg_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)

3 PE (PEmerge)

PEdf <- read.csv("/Users/h/Documents/projects_local/cue_expectancy/data/RL/modelfit_072024/table_pain.csv")


clean_filename <- function(filename) {
  # Remove _cue that follows cuetype-
  filename <- gsub("(cuetype-[^_]+)_cue", "\\1", filename)
  # Remove _stim_stim that follows stimintensity-
  filename <- gsub("(stimintensity-[^_]+)_stim_stim", "\\1", filename)
  return(filename)
}

# Apply the function to the singletrial_fname column
PEdf$singletrial_fname <- sapply(PEdf$singletrial_fname, clean_filename)
PEmerge<- inner_join(df_filter, PEdf, by = "singletrial_fname")
#
# PEmerge <- merge(dfmerge, PEdf, by = c("src_subject_id", "session_id", "param_run_num", "trial_index"))
print(PEmerge)
# A tibble: 4,937 × 133
   trial_index.x cue      stimulusintensity singletrial_fname   sub   ses.x run   runtype.x event trial cuetype stimintensity expectrating.x
           <dbl> <chr>    <chr>             <chr>               <chr> <chr> <chr> <chr>     <chr> <chr> <chr>   <chr>         <chr>         
 1             1 low_cue  med_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 000   low     med           n/a           
 2             2 high_cue low_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 001   high    low           116.36        
 3             3 high_cue high_stim         sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 002   high    high          38.16         
 4             4 low_cue  med_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 003   low     med           34.39         
 5             5 high_cue low_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 004   high    low           80.5          
 6             6 low_cue  high_stim         sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 005   low     high          7.53          
 7             7 low_cue  low_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 006   low     low           29.77         
 8             8 high_cue med_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 007   high    med           54.46         
 9             9 low_cue  high_stim         sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 008   low     high          26.26         
10            10 high_cue med_stim          sub-0005_ses-03_ru… sub-… ses-… run-… pain      stim… 009   high    med           65.56         
# ℹ 4,927 more rows
# ℹ 120 more variables: expectlabel.x <chr>, outcomerating.x <chr>, outcomelabel.x <chr>, expectrating_impute <dbl>,
#   expectlabel_impute.x <chr>, outcomerating_impute <dbl>, outcomelabel_impute.x <chr>, expectrating_RT.x <chr>,
#   expectrating_RTimpute.x <chr>, outcomerating_RT.x <chr>, outcomerating_RTimpute.x <chr>, pain_stimulus_delivery_success.x <chr>,
#   X.x <int>, file_path.x <int>, nps <dbl>, nps_corr <dbl>, nps_cosine <dbl>, npspos <dbl>, npspos_corr <dbl>, npspos_cosine <dbl>,
#   npsneg <dbl>, npsneg_corr <dbl>, npsneg_cosine <dbl>, pos_nps_vermis <dbl>, pos_nps_vermis_corr <dbl>, pos_nps_vermis_cosine <dbl>,
#   pos_nps_rIns <dbl>, pos_nps_rIns_corr <dbl>, pos_nps_rIns_cosine <dbl>, pos_nps_rV1 <dbl>, pos_nps_rV1_corr <dbl>, …
PE.model <- lmer(nps ~ PE_mdl2 + (1 | sub), data=PEmerge, control = lmerControl(optimizer = "nmkbw"))
summary(PE.model)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: nps ~ PE_mdl2 + (1 | sub)
   Data: PEmerge
Control: lmerControl(optimizer = "nmkbw")

REML criterion at convergence: 27628.2

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.1811 -0.5308 -0.0090  0.5348  4.8091 

Random effects:
 Groups   Name        Variance Std.Dev.
 sub      (Intercept)  3.874   1.968   
 Residual             15.008   3.874   
Number of obs: 4937, groups:  sub, 88

Fixed effects:
             Estimate Std. Error        df t value Pr(>|t|)    
(Intercept) 5.140e+00  2.182e-01 8.715e+01  23.552  < 2e-16 ***
PE_mdl2     1.659e-02  2.346e-03 4.904e+03   7.071 1.76e-12 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
        (Intr)
PE_mdl2 -0.048
sjPlot::tab_model(PE.model,
                  title = "Multilevel-modeling: \nlmer(NPS ~ PE + (PE | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(NPS ~ PE + (PE | sub), data = pain)
  nps
Predictors Estimates CI p
(Intercept) 5.14 4.71 – 5.57 <0.001
PE mdl2 0.02 0.01 – 0.02 <0.001
Random Effects
σ2 15.01
τ00 sub 3.87
ICC 0.21
N sub 88
Observations 4937
Marginal R2 / Conditional R2 0.009 / 0.212
equatiomatic::extract_eq(PE.model)

\[ \begin{aligned} \operatorname{nps}_{i} &\sim N \left(\alpha_{j[i]} + \beta_{1}(\operatorname{PE\_mdl2}), \sigma^2 \right) \\ \alpha_{j} &\sim N \left(\mu_{\alpha_{j}}, \sigma^2_{\alpha_{j}} \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

sink("/Users/h/Desktop/PEnps_output.txt")  # Redirects output to 'my_output.txt'
summary(PE.model)
sink(NULL)  #

library(ggplot2)

min_value <- -40
max_value <- 40
PEmerge$sub <- factor(PEmerge$sub)
plot.PE_NPS <- ggplot(PEmerge, aes(x = PE_mdl2, y = NPS)) +
  geom_point(aes(colour = sub), size = .1) +  # Points colored by subject
  geom_smooth(aes(colour = sub), method = 'lm', formula = y ~ x, se = FALSE, size = .3, linetype = "dashed") +  # Subject-wise regression lines
  geom_smooth(method = 'lm', formula = y ~ x, se = FALSE, size = .5, color = "black") +  # Group regression line
  # ylim(min_value, max_value) +  # Set y-axis limits
  # xlim(min_value, max_value)
  theme_classic2()  +# Use a theme with a white background
xlab("PE") +
   theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 24, ), # Axis titles
          axis.text = element_text(size = 18), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5),# Plot title
         legend.position = "none"
          )
plot.PE_NPS

ggsave("/Users/h/Documents/projects_local/cue_expectancy/resources/plots_dissertation/ch4/ch4_NPSPE.png",plot.PE_NPS, w=5, h=4, dpi=300)

Q. How correlated are the outcome ratings and NPS values?? TODO: can we indicate correlation based on significance

library(corrplot)
PEmerge$cuetype_numeric <- as.numeric(as.factor(PEmerge$cuetype))
PEmerge$beh_PE <- PEmerge$outcomerating_impute - PEmerge$expectrating_impute
subset <- PEmerge[, c( "outcomerating_impute", "expectrating_impute", "PE_mdl2","beh_PE", "nps", "npspos",  "npsneg", "cuetype_numeric") ]
cor_matrix <- cor(subset, use = "pairwise.complete.obs")
corrplot(cor_matrix, method = "circle")

# Correlation between cuetype and model-based PE
correlation_model <- cor_matrix["cuetype_numeric", "PE_mdl2"]
print(correlation_model)
[1] 0.611056
# Correlation between cuetype and behavioral PE
correlation_behavioral <- cor_matrix["cuetype_numeric", "beh_PE"]
print(correlation_behavioral)
[1] 0.4052953

Tor suggestion lmer with PE

What to do when model fit is singular https://stackoverflow.com/questions/54597496/how-to-cope-with-a-singular-fit-in-a-linear-mixed-model-lme4 Singular. it means * one or more random effect variances are near zero, * and the model is overfitting or overparameterized (can’t estimate all the random slopes reliably). suggestion. Simplify the random effect structure

Q. Why would i have a singular random effets structure? A1. when you have too many variables you’re fitting. You include too many random slopes relative to the amount of data A2. the predictors are highly collinear (PE is similar to cue)

NPS pos is predicted by cue and stim Once PE comes into the picture, matrix is singular

summary(NPSPE)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: NPS ~ STIM_linear + STIM_quadratic + PE_mdl2 + (1 + PE_mdl2 |      sub)
   Data: PEmerge

REML criterion at convergence: 27580.9

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.3591 -0.5302 -0.0024  0.5372  4.7212 

Random effects:
 Groups   Name        Variance  Std.Dev.  Corr 
 sub      (Intercept) 3.918e+00 1.9795146      
          PE_mdl2     2.835e-07 0.0005324 -1.00
 Residual             1.485e+01 3.8539140      
Number of obs: 4937, groups:  sub, 88

Fixed effects:
                Estimate Std. Error        df t value Pr(>|t|)    
(Intercept)    5.183e+00  2.194e-01 8.666e+01  23.620  < 2e-16 ***
STIM_linear    1.058e+00  1.544e-01 4.842e+03   6.854  8.1e-12 ***
STIM_quadratic 2.661e-01  1.175e-01 4.846e+03   2.265  0.02358 *  
PE_mdl2        7.495e-03  2.680e-03 4.855e+03   2.796  0.00519 ** 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
            (Intr) STIM_l STIM_q
STIM_linear  0.028              
STIM_qudrtc  0.001  0.014       
PE_mdl2     -0.076 -0.491 -0.031
optimizer (nloptwrap) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
#
# model.NPS <- lmer(NPS ~ CUE_high_gt_low*STIM_linear +
#        CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low+STIM_linear|sub), data = df_filter)

NPSPE <- lmer(NPS ~ STIM_linear +
       STIM_quadratic  + PE_mdl2 + (1 + PE_mdl2   |sub),
     data = PEmerge)
boundary (singular) fit: see help('isSingular')
     # control=lmerControl(isSingular(NPSPE, tol = 1e-4)))
       # check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
summary(NPSPE)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: NPS ~ STIM_linear + STIM_quadratic + PE_mdl2 + (1 + PE_mdl2 |      sub)
   Data: PEmerge

REML criterion at convergence: 27580.9

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-7.3591 -0.5302 -0.0024  0.5372  4.7212 

Random effects:
 Groups   Name        Variance  Std.Dev.  Corr 
 sub      (Intercept) 3.918e+00 1.9795146      
          PE_mdl2     2.835e-07 0.0005324 -1.00
 Residual             1.485e+01 3.8539140      
Number of obs: 4937, groups:  sub, 88

Fixed effects:
                Estimate Std. Error        df t value Pr(>|t|)    
(Intercept)    5.183e+00  2.194e-01 8.666e+01  23.620  < 2e-16 ***
STIM_linear    1.058e+00  1.544e-01 4.842e+03   6.854  8.1e-12 ***
STIM_quadratic 2.661e-01  1.175e-01 4.846e+03   2.265  0.02358 *  
PE_mdl2        7.495e-03  2.680e-03 4.855e+03   2.796  0.00519 ** 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
            (Intr) STIM_l STIM_q
STIM_linear  0.028              
STIM_qudrtc  0.001  0.014       
PE_mdl2     -0.076 -0.491 -0.031
optimizer (nloptwrap) convergence code: 0 (OK)
boundary (singular) fit: see help('isSingular')
sjPlot::tab_model(NPSPE,
                  title = "model 2: \nlmer(NPS ~ cue + stim + PE + (cue + stim | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
model 2: lmer(NPS ~ cue + stim + PE + (cue + stim | sub), data = pain)
  NPS
Predictors Estimates CI p
(Intercept) 5.18 4.75 – 5.61 <0.001
STIM linear 1.06 0.76 – 1.36 <0.001
STIM quadratic 0.27 0.04 – 0.50 0.024
PE mdl2 0.01 0.00 – 0.01 0.005
Random Effects
σ2 14.85
τ00 sub 3.92
τ11 sub.PE_mdl2 0.00
ρ01 sub -1.00
N sub 88
Observations 4937
Marginal R2 / Conditional R2 0.021 / NA
equatiomatic::extract_eq(NPSPE)

\[ \begin{aligned} \operatorname{NPS}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1}(\operatorname{STIM\_linear}) + \beta_{2}(\operatorname{STIM\_quadratic}) + \beta_{3j[i]}(\operatorname{PE\_mdl2}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{3j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{3j}} \end{aligned} \end{array} \right) , \left( \begin{array}{cc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{3j}} \\ \rho_{\beta_{3j}\alpha_{j}} & \sigma^2_{\beta_{3j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

library(sjPlot)

# Plot marginal effect (partial slope) for PE
library(ggplot2)

# Plot fitted values by subject
PEmerge$predicted_NPS <- predict(NPSPE)
# ggplot(PEmerge, aes(x = PE_mdl2, y = predicted_NPS, color = sub)) +
#   geom_line(aes(group = sub), alpha = 0.4) +
#   labs(title = "Subject-specific predicted slopes for Prediction Error",
#        x = "Prediction Error (PE_mdl2)",
#        y = "Predicted NPS") +
#   theme_minimal(base_size = 14) +
#   theme(legend.position = "none")
library(dplyr)

PEmerge_sorted <- PEmerge %>%
  arrange(sub, PE_mdl2)

ggplot(PEmerge_sorted, aes(x = PE_mdl2, y = predicted_NPS, color = sub)) +
  geom_line(aes(group = sub), alpha = 0.4) +
  labs(title = "Subject-specific predicted slopes for Prediction Error",
       x = "Prediction Error (PE_mdl2)",
       y = "Predicted NPS") +
  theme_minimal(base_size = 14) +
  theme(legend.position = "none")

library(ggeffects)
library(ggplot2)

# Get predicted values for PE across its range
pe_pred <- ggpredict(NPSPE, terms = "PE_mdl2")

# Plot
ggplot(pe_pred, aes(x = x, y = predicted)) +
  geom_line() +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = 0.2) +
  labs(title = "Partial Effect of Prediction Error on NPS",
       x = "Prediction Error (PE_mdl2)",
       y = "Predicted NPS") +
  theme_minimal(base_size = 14)

plot_model(NPSPE, type = "eff", terms = c( "STIM_linear","PE_mdl2"))

# # model 2: NPS PE positive values
# NPSPEpos <- lmer(npspos~ cuetype + stimintensity + PE_mdl2 + (cuetype + stimintensity|sub),
#      data = PEmerge,
#      control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
# summary(NPSPEpos)
# vif(NPSPEpos)
# sjPlot::tab_model(NPSPEpos,
#                   title = "model 2: \nlmer(NPSpos ~ cue + stim + PE + (cue + stim | sub), data = pain)",
#                   CSS = list(css.table = '+font-size: 12;'))
# equatiomatic::extract_eq(NPSPEpos)

4 NPS nociceptive ROI (df_filter.NA)

# STEP 0. filter data __________________________________________________________
# Make sure that each condition cell has adequate amount of trials
dependent_vars <- c("nps","npspos","npsneg",
                    "pos_nps_vermis","pos_nps_rIns","pos_nps_rV1","pos_nps_rThal",
                    "pos_nps_lIns","pos_nps_rdpIns","pos_nps_rS2_Op","pos_nps_dACC",
                    "neg_nps_rLOC","neg_nps_lLOC","neg_nps_rpLOC","neg_nps_pgACC","neg_nps_lSTS","neg_nps_rIPL","neg_nps_PCC"  )
newlist <- c("NPS","NPS positive weights","NPS negative weights",
                    "Vermis","rIns","rV1","rThal",
                    "lIns","rdpIns","rS2 & Op","dACC",
                    "neg_nps_rLOC","neg_nps_lLOC","neg_nps_rpLOC","neg_nps_pgACC","neg_nps_lSTS","neg_nps_rIPL","neg_nps_PCC"  )

# Initialize a DataFrame to collect combined results
combined_se_calc_cooksd <- data.frame()
for (i in seq_along(dependent_vars)) {

  dv <- dependent_vars[i]
  dv_title <- newlist[i]
  print(paste("_______",i, dv, "_______"))

subjects_with_inadequate_data <- data %>%
  group_by(sub, CUE_high_gt_low, STIM_linear) %>% #SES_linear,
  dplyr::summarise(count = n(), .groups = 'drop') %>%
  filter(count < 3) %>%
  distinct(sub) %>%
  pull(sub)
df_filter <- data %>%
  filter(!(sub %in% subjects_with_inadequate_data))

print(sprintf("after filtering out subjects that have less than 3 trials in cell, we have N=%d -> N=%d",length(unique(data$sub)), length(unique(df_filter$sub)) ))

## QC. check NPS distribution __________________________________________________

df_filter.NA <- df_filter %>% filter(!is.na(nps))  # Remove NA values
head(df_filter.NA)


combined_se_calc_cooksd <- data.frame()
taskname = "pain"
ggtitle <- paste(taskname, " - NPS (degree)")
title <- paste(taskname, " - actual")
subject <- "sub"
w <- 10
h <- 6


analysis_dir <-
  file.path(main_dir,
            "analysis",
            "mixedeffect",
            analysis_folder,
            as.character(Sys.Date())) # nolint
dir.create(analysis_dir,
           showWarnings = FALSE,
           recursive = TRUE)
savedir <- analysis_dir

df_filter.NA <- df_filter.NA %>%
    group_by(sub) %>%
    mutate(NPS_scaled = scale(nps)) %>%
    ungroup()

# 1. rename reordering for plots ______________________________________________
df_filter.NA$cue_name <- NA; df_filter.NA$stim_name <- NA
df_filter.NA$cue_name[df_filter.NA$cue == "high_cue"] <-  "high cue"
df_filter.NA$cue_name[df_filter.NA$cue == "low_cue"] <-  "low cue"

df_filter.NA$stim_name[df_filter.NA$stimulusintensity == "high_stim"] <-  "High"
df_filter.NA$stim_name[df_filter.NA$stimulusintensity == "med_stim"] <-  "Med"
df_filter.NA$stim_name[df_filter.NA$stimulusintensity == "low_stim"] <-  "Low"

df_filter.NA$stim_ordered <- factor(df_filter.NA$stim_name, levels = c("Low", "Med", "High"))
df_filter.NA$cue_ordered <- factor(df_filter.NA$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"


# 2. lmer model ________________________________________________________________
model_savefname <- file.path(
  analysis_dir,
  paste(
    "lmer_nps-",dv,"_",as.character(Sys.Date()),".txt",
    sep = ""
  )
)
formula_string <- paste(dv, "~ CUE_high_gt_low * STIM_linear + CUE_high_gt_low * STIM_quadratic + (CUE_high_gt_low + STIM_linear | sub)")
model_formula <- as.formula(formula_string)
# Fit the model
sink(model_savefname)
# This control parameter which ignores the singular warning allows for a zero estimate.
# preoceed with caution. # Read: https://stackoverflow.com/questions/54597496/how-to-cope-with-a-singular-fit-in-a-linear-mixed-model-lme4
model.npscuestim <- lmer(model_formula, data = df_filter.NA,
                         control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
                         # control = lmerControl(optimizer = "nmkbw"))
print(summary(model.npscuestim))
sink()

# Check why convergence is negative ____________________________________________
aa <- lme4::allFit(model.npscuestim)
opt.summary <- glance(aa) |> select(optimizer, AIC, NLL_rel) |> arrange(NLL_rel)
print(opt.summary)


# R2 ___________________________________________________________________________
r_squared <- MuMIn::r.squaredGLMM(model.npscuestim)
R2m = r_squared[1] # Marginal R-squared
R2c = r_squared[2]  # Conditional R-squared



# summary statistics calculate mean and se  ____________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter.NA,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

combined_se_calc_cooksd <-
  rbind(combined_se_calc_cooksd, NPSstimcue_groupwise)

# plot parameters ______________________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
subject <- "sub"
ggtitle <- paste(taskname, " - NPS Cooksd removed")
title <- paste(taskname, " - NPS")
xlab <- ""
ylab <- "NPS (degree)"
ylim <- c(-1,1)
dv_keyword <- "NPS"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c("#4274AD", "#C5263A")
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), "_cooksd.png",
    sep = ""
  )
)



### Lineplots with only low cue ____________________________________
subsetNPS <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- cueR::plot_lineplot_twofactor_subset(subsetNPS, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = "#4274AD", "#D73027"),
                        ggtitle = dv_title,
                        xlab = "Stimulus intensity", ylab = "Mean score")
g + theme(aspect.ratio=.8)


### Lineplots________________________________________________________________________
g <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = "#4274AD","high cue" = "#C5263A"),
                        ggtitle = dv_title,
                        xlab = "Stimulus intensity", ylab = "Mean score")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2)  # Adjust point size


#   print(arranged_plot)
  plot_filename = file.path(analysis_dir,
  paste('lineplot_task-all_rating-', dv_keyword,dv, '.svg', sep = ""))

print(g)

  ggsave(plot_filename, g, width = 8, height = 4, dpi=300)
}
[1] "_______ 1 nps _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00538923 (tol =
0.002, component 1)
[OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC      NLL_rel
  <chr>                          <dbl>        <dbl>
1 bobyqa                        28077. 0           
2 nlminbwrap                    28077. 0.0000000235
3 nloptwrap.NLOPT_LN_NELDERMEAD 28077. 0.0000000510
4 optimx.L-BFGS-B               28077. 0.000000114 
5 nloptwrap.NLOPT_LN_BOBYQA     28077. 0.000000525 
6 nmkbw                         28077. 0.000000719 
7 Nelder_Mead                   28077. 0.0000136   
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`

[1] "_______ 2 npspos _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00654829 (tol =
0.002, component 1)
[OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : 
Warning in optimx.check(par, optcfg$ufn, optcfg$ugr, optcfg$uhess, lower, : Parameters or bounds appear to have different scalings.
  This can cause poor performance in optimization. 
  It is important for derivative free methods like BOBYQA, UOBYQA, NEWUOA.
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC     NLL_rel
  <chr>                          <dbl>       <dbl>
1 bobyqa                        22819. 0          
2 optimx.L-BFGS-B               22819. 0.000000129
3 nlminbwrap                    22819. 0.000000298
4 nloptwrap.NLOPT_LN_BOBYQA     22819. 0.000000769
5 nloptwrap.NLOPT_LN_NELDERMEAD 22819. 0.00000153 
6 nmkbw                         22819. 0.00000172 
7 Nelder_Mead                   22819. 0.0000170  
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 3 npsneg _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
Warning: Model failed to converge with 1 negative eigenvalue: -1.4e+00
bobyqa : [OK]
Nelder_Mead : 
Warning: Model failed to converge with 1 negative eigenvalue: -1.9e+00
[OK]
nlminbwrap : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00213547 (tol =
0.002, component 1)
[OK]
nmkbw : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00255455 (tol =
0.002, component 1)
[OK]
optimx.L-BFGS-B : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.0143562 (tol =
0.002, component 1)
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : 
Warning: Model failed to converge with 1 negative eigenvalue: -1.7e+00
[OK]
nloptwrap.NLOPT_LN_BOBYQA : 
Warning: Model failed to converge with 1 negative eigenvalue: -1.4e+00
[OK]
# A tibble: 7 × 3
  optimizer                        AIC      NLL_rel
  <chr>                          <dbl>        <dbl>
1 nloptwrap.NLOPT_LN_NELDERMEAD 14927. 0           
2 bobyqa                        14927. 0.0000000160
3 nlminbwrap                    14927. 0.00000444  
4 nmkbw                         14927. 0.0000128   
5 optimx.L-BFGS-B               14927. 0.0000816   
6 nloptwrap.NLOPT_LN_BOBYQA     14927. 0.0128      
7 Nelder_Mead                   14927. 0.0129      
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 4 pos_nps_vermis _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : 
Warning: Model failed to converge with 1 negative eigenvalue: -6.8e+00
[OK]
Nelder_Mead : [OK]
nlminbwrap : 
Warning: Model failed to converge with 1 negative eigenvalue: -6.8e+01
[OK]
nmkbw : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00207468 (tol =
0.002, component 1)
[OK]
optimx.L-BFGS-B : 
Warning: Model failed to converge with 1 negative eigenvalue: -5.3e+01
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                         AIC      NLL_rel
  <chr>                           <dbl>        <dbl>
1 Nelder_Mead                   -13100. 0           
2 nloptwrap.NLOPT_LN_BOBYQA     -13100. 0.0000000322
3 nloptwrap.NLOPT_LN_NELDERMEAD -13100. 0.0000000364
4 nmkbw                         -13100. 0.00000118  
5 bobyqa                        -13099. 0.187       
6 nlminbwrap                    -13099. 0.187       
7 optimx.L-BFGS-B               -13099. 0.187       
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 5 pos_nps_rIns _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.0666718 (tol =
0.002, component 1)
[OK]
nlminbwrap : [OK]
nmkbw : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.0023069 (tol =
0.002, component 1)
[OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                       AIC       NLL_rel
  <chr>                         <dbl>         <dbl>
1 bobyqa                        8729. 0            
2 optimx.L-BFGS-B               8729. 0.00000000739
3 nloptwrap.NLOPT_LN_NELDERMEAD 8729. 0.0000000821 
4 nloptwrap.NLOPT_LN_BOBYQA     8729. 0.000000116  
5 nlminbwrap                    8729. 0.000000158  
6 nmkbw                         8729. 0.00000164   
7 Nelder_Mead                   8729. 0.00119      
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 6 pos_nps_rV1 _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00414318 (tol =
0.002, component 1)
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC  NLL_rel
  <chr>                          <dbl>    <dbl>
1 bobyqa                        -2581. 0       
2 Nelder_Mead                   -2581. 1.86e-10
3 nlminbwrap                    -2581. 2.42e-10
4 nloptwrap.NLOPT_LN_NELDERMEAD -2581. 1.28e- 8
5 nloptwrap.NLOPT_LN_BOBYQA     -2581. 7.80e- 8
6 nmkbw                         -2581. 4.40e- 7
7 optimx.L-BFGS-B               -2581. 8.56e- 6
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 7 pos_nps_rThal _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : 
Warning: Model failed to converge with 1 negative eigenvalue: -3.5e+02
[OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC       NLL_rel
  <chr>                          <dbl>         <dbl>
1 bobyqa                        -8600. 0            
2 nlminbwrap                    -8600. 0.00000000108
3 nloptwrap.NLOPT_LN_BOBYQA     -8600. 0.0000000169 
4 Nelder_Mead                   -8600. 0.000000420  
5 optimx.L-BFGS-B               -8600. 0.000000441  
6 nloptwrap.NLOPT_LN_NELDERMEAD -8600. 0.000000567  
7 nmkbw                         -8600. 0.00000164   
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 8 pos_nps_lIns _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : 
Warning in optimx.check(par, optcfg$ufn, optcfg$ugr, optcfg$uhess, lower, : Parameters or bounds appear to have different scalings.
  This can cause poor performance in optimization. 
  It is important for derivative free methods like BOBYQA, UOBYQA, NEWUOA.
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC  NLL_rel
  <chr>                          <dbl>    <dbl>
1 bobyqa                        -3622. 0       
2 nlminbwrap                    -3622. 5.68e-10
3 Nelder_Mead                   -3622. 1.52e- 9
4 nloptwrap.NLOPT_LN_NELDERMEAD -3622. 4.18e- 8
5 nmkbw                         -3622. 3.26e- 7
6 nloptwrap.NLOPT_LN_BOBYQA     -3622. 3.42e- 7
7 optimx.L-BFGS-B               -3622. 1.10e- 6
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 9 pos_nps_rdpIns _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : 
Warning: Model failed to converge with 1 negative eigenvalue: -2.5e+01
[OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00498826 (tol =
0.002, component 1)
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC       NLL_rel
  <chr>                          <dbl>         <dbl>
1 nloptwrap.NLOPT_LN_NELDERMEAD -8983. 0            
2 Nelder_Mead                   -8983. 0.00000000850
3 nlminbwrap                    -8983. 0.0000000213 
4 nloptwrap.NLOPT_LN_BOBYQA     -8983. 0.0000000347 
5 nmkbw                         -8983. 0.000000859  
6 optimx.L-BFGS-B               -8983. 0.00000797   
7 bobyqa                        -8983. 0.0406       
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 10 pos_nps_rS2_Op _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC       NLL_rel
  <chr>                          <dbl>         <dbl>
1 bobyqa                        -2146. 0            
2 nlminbwrap                    -2146. 0.00000000522
3 Nelder_Mead                   -2146. 0.00000000859
4 nloptwrap.NLOPT_LN_NELDERMEAD -2146. 0.0000000540 
5 optimx.L-BFGS-B               -2146. 0.0000000741 
6 nloptwrap.NLOPT_LN_BOBYQA     -2146. 0.000000187  
7 nmkbw                         -2146. 0.000000397  
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 11 pos_nps_dACC _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                       AIC      NLL_rel
  <chr>                         <dbl>        <dbl>
1 bobyqa                        9557. 0           
2 nlminbwrap                    9557. 0.0000000136
3 nloptwrap.NLOPT_LN_NELDERMEAD 9557. 0.000000173 
4 nmkbw                         9557. 0.000000976 
5 nloptwrap.NLOPT_LN_BOBYQA     9557. 0.00000119  
6 optimx.L-BFGS-B               9557. 0.00000352  
7 Nelder_Mead                   9557. 0.0000124   
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 12 neg_nps_rLOC _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : 
Warning: Model failed to converge with 1 negative eigenvalue: -2.2e+01
[OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC  NLL_rel
  <chr>                          <dbl>    <dbl>
1 bobyqa                        -4664. 0       
2 Nelder_Mead                   -4664. 6.88e-10
3 nloptwrap.NLOPT_LN_NELDERMEAD -4664. 1.21e- 8
4 nlminbwrap                    -4664. 5.24e- 8
5 nloptwrap.NLOPT_LN_BOBYQA     -4664. 1.56e- 7
6 optimx.L-BFGS-B               -4664. 5.99e- 7
7 nmkbw                         -4664. 3.62e- 3
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 13 neg_nps_lLOC _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : 
Warning: Model failed to converge with 1 negative eigenvalue: -8.4e+02
[OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC       NLL_rel
  <chr>                          <dbl>         <dbl>
1 bobyqa                        -7689. 0            
2 Nelder_Mead                   -7689. 0.00000000595
3 nloptwrap.NLOPT_LN_NELDERMEAD -7689. 0.0000000247 
4 nloptwrap.NLOPT_LN_BOBYQA     -7689. 0.0000000560 
5 nmkbw                         -7689. 0.000000281  
6 optimx.L-BFGS-B               -7689. 0.000000419  
7 nlminbwrap                    -7685. 1.90         
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 14 neg_nps_rpLOC _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
Warning: Model failed to converge with 1 negative eigenvalue: -4.6e-01
bobyqa : 
Warning: Model failed to converge with 1 negative eigenvalue: -7.1e+01
[OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : 
Warning: Model failed to converge with 1 negative eigenvalue: -8.2e+01
[OK]
nloptwrap.NLOPT_LN_BOBYQA : 
Warning: Model failed to converge with 1 negative eigenvalue: -4.6e-01
[OK]
# A tibble: 7 × 3
  optimizer                        AIC  NLL_rel
  <chr>                          <dbl>    <dbl>
1 nlminbwrap                    -1573. 0       
2 Nelder_Mead                   -1573. 4.76e-10
3 nmkbw                         -1573. 1.38e- 6
4 optimx.L-BFGS-B               -1573. 1.48e- 6
5 bobyqa                        -1573. 1.50e- 2
6 nloptwrap.NLOPT_LN_NELDERMEAD -1573. 1.50e- 2
7 nloptwrap.NLOPT_LN_BOBYQA     -1573. 1.50e- 2
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 15 neg_nps_pgACC _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.0371812 (tol =
0.002, component 1)
[OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC  NLL_rel
  <chr>                          <dbl>    <dbl>
1 bobyqa                        -7616. 0       
2 nlminbwrap                    -7616. 3.08e-10
3 optimx.L-BFGS-B               -7616. 4.85e- 9
4 nloptwrap.NLOPT_LN_BOBYQA     -7616. 1.63e- 8
5 nloptwrap.NLOPT_LN_NELDERMEAD -7616. 4.84e- 8
6 nmkbw                         -7616. 6.52e- 7
7 Nelder_Mead                   -7616. 3.47e- 4
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 16 neg_nps_lSTS _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 1 negative
eigenvalues
Warning: Model failed to converge with 1 negative eigenvalue: -2.4e+00
bobyqa : 
Warning: Model failed to converge with 1 negative eigenvalue: -2.9e+00
[OK]
Nelder_Mead : [OK]
nlminbwrap : 
Warning: Model failed to converge with 1 negative eigenvalue: -3.0e+00
[OK]
nmkbw : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.0353021 (tol =
0.002, component 1)
[OK]
optimx.L-BFGS-B : 
Warning: Model failed to converge with 1 negative eigenvalue: -9.4e-01
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 1 negative
eigenvalues
Warning: Model failed to converge with 1 negative eigenvalue: -2.4e+00
[OK]
# A tibble: 7 × 3
  optimizer                        AIC  NLL_rel
  <chr>                          <dbl>    <dbl>
1 nmkbw                         -2550. 0       
2 nloptwrap.NLOPT_LN_BOBYQA     -2550. 0.000917
3 bobyqa                        -2550. 0.000990
4 optimx.L-BFGS-B               -2550. 0.000990
5 nlminbwrap                    -2550. 0.000990
6 Nelder_Mead                   -2550. 0.000990
7 nloptwrap.NLOPT_LN_NELDERMEAD -2550. 0.000990
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`

[1] "_______ 17 neg_nps_rIPL _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : [OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC       NLL_rel
  <chr>                          <dbl>         <dbl>
1 bobyqa                        -8648. 0            
2 optimx.L-BFGS-B               -8648. 0.00000000182
3 nlminbwrap                    -8648. 0.00000000747
4 Nelder_Mead                   -8648. 0.0000000228 
5 nloptwrap.NLOPT_LN_BOBYQA     -8648. 0.0000000534 
6 nloptwrap.NLOPT_LN_NELDERMEAD -8648. 0.0000000768 
7 nmkbw                         -8648. 0.000000263  
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

[1] "_______ 18 neg_nps_PCC _______"
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=92"
bobyqa : [OK]
Nelder_Mead : 
Warning: Model failed to converge with 1 negative eigenvalue: -1.6e-01
[OK]
nlminbwrap : [OK]
nmkbw : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00236044 (tol =
0.002, component 1)
[OK]
optimx.L-BFGS-B : [OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
# A tibble: 7 × 3
  optimizer                        AIC       NLL_rel
  <chr>                          <dbl>         <dbl>
1 bobyqa                        -3803. 0            
2 nlminbwrap                    -3803. 0.00000000220
3 optimx.L-BFGS-B               -3803. 0.0000000487 
4 nloptwrap.NLOPT_LN_NELDERMEAD -3803. 0.0000000900 
5 nloptwrap.NLOPT_LN_BOBYQA     -3803. 0.000000199  
6 nmkbw                         -3803. 0.00000160   
7 Nelder_Mead                   -3803. 0.287        
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
Ignoring unknown parameters: `width`

5 SIIPS (df_filter.NA)

SIIPS linear analysis

model.SIIPS <- lmer(SIIPS ~ CUE_high_gt_low*STIM_linear +
       CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low*STIM_linear|sub),
       data = df_filter.NA,
       control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
summary(model.SIIPS)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: SIIPS ~ CUE_high_gt_low * STIM_linear + CUE_high_gt_low * STIM_quadratic +      (CUE_high_gt_low * STIM_linear | sub)
   Data: df_filter.NA
Control: lmerControl(check.conv.singular = .makeCC(action = "ignore",      tol = 1e-04))

REML criterion at convergence: 74772.9

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-6.1802 -0.5584  0.0115  0.5581  5.0361 

Random effects:
 Groups   Name                        Variance Std.Dev. Corr          
 sub      (Intercept)                  72328.3 268.94                 
          CUE_high_gt_low               1312.0  36.22   0.23          
          STIM_linear                    318.8  17.85   0.97 0.00     
          CUE_high_gt_low:STIM_linear   2478.5  49.78   1.00 0.15 0.99
 Residual                             159652.9 399.57                 
Number of obs: 5029, groups:  sub, 92

Fixed effects:
                               Estimate Std. Error       df t value Pr(>|t|)    
(Intercept)                     578.047     28.700   90.785  20.141  < 2e-16 ***
CUE_high_gt_low                  35.484     11.948   86.226   2.970  0.00386 ** 
STIM_linear                     116.723     13.938 1478.815   8.374  < 2e-16 ***
STIM_quadratic                   33.905     12.068 4852.260   2.809  0.00498 ** 
CUE_high_gt_low:STIM_linear     -39.572     28.113 1010.404  -1.408  0.15955    
CUE_high_gt_low:STIM_quadratic    3.901     24.136 4853.001   0.162  0.87159    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
                     (Intr) CUE_h__ STIM_l STIM_q CUE_hgh_gt_lw:STIM_l
CUE_hgh_gt_           0.072                                           
STIM_linear           0.128  0.005                                    
STIM_qudrtc           0.000 -0.001  -0.002                            
CUE_hgh_gt_lw:STIM_l  0.182  0.010   0.026 -0.004                     
CUE_hgh_gt_lw:STIM_q  0.000 -0.001  -0.004  0.000 -0.002              
sjPlot::tab_model(model.SIIPS,
                  title = "Multilevel-modeling: \nlmer(SIIPS ~ CUE * STIM + (CUE + STIM | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(SIIPS ~ CUE * STIM + (CUE + STIM | sub), data = pain)
  SIIPS
Predictors Estimates CI p
(Intercept) 578.05 521.78 – 634.31 <0.001
CUE high gt low 35.48 12.06 – 58.91 0.003
STIM linear 116.72 89.40 – 144.05 <0.001
STIM quadratic 33.91 10.25 – 57.56 0.005
CUE high gt low × STIM
linear
-39.57 -94.69 – 15.54 0.159
CUE high gt low × STIM
quadratic
3.90 -43.42 – 51.22 0.872
Random Effects
σ2 159652.92
τ00 sub 72328.33
τ11 sub.CUE_high_gt_low 1312.04
τ11 sub.STIM_linear 318.80
τ11 sub.CUE_high_gt_low:STIM_linear 2478.54
ρ01 0.23
0.97
1.00
N sub 92
Observations 5029
Marginal R2 / Conditional R2 0.018 / NA
sink("/Users/h/Desktop/SIIPS_output.txt")  # Redirects output to 'my_output.txt'
summary(model.SIIPS)
equatiomatic::extract_eq(model.SIIPS)

\[ \begin{aligned} \operatorname{SIIPS}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1j[i]}(\operatorname{CUE\_high\_gt\_low}) + \beta_{2j[i]}(\operatorname{STIM\_linear}) + \beta_{3}(\operatorname{STIM\_quadratic}) + \beta_{4j[i]}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_linear}) + \beta_{5}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_quadratic}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{1j} \\ &\beta_{2j} \\ &\beta_{4j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{1j}} \\ &\mu_{\beta_{2j}} \\ &\mu_{\beta_{4j}} \end{aligned} \end{array} \right) , \left( \begin{array}{cccc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{1j}} & \rho_{\alpha_{j}\beta_{2j}} & \rho_{\alpha_{j}\beta_{4j}} \\ \rho_{\beta_{1j}\alpha_{j}} & \sigma^2_{\beta_{1j}} & \rho_{\beta_{1j}\beta_{2j}} & \rho_{\beta_{1j}\beta_{4j}} \\ \rho_{\beta_{2j}\alpha_{j}} & \rho_{\beta_{2j}\beta_{1j}} & \sigma^2_{\beta_{2j}} & \rho_{\beta_{2j}\beta_{4j}} \\ \rho_{\beta_{4j}\alpha_{j}} & \rho_{\beta_{4j}\beta_{1j}} & \rho_{\beta_{4j}\beta_{2j}} & \sigma^2_{\beta_{4j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

sink(NULL)  #
dv <- "SIIPS"
taskname <- "pain"
subject <- "sub"
df_filter$stim_name <- NA; df_filter$cue_name <- NA
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"

df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
SIIPS_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
SIIPS_groupwise <- summarySEwithin(
  data = SIIPS_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
SIIPS_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "SIIPS"
ylim <- c(-10, 60)
dv_keyword <- "siips"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
SIIPSsubset <- SIIPS_groupwise[SIIPS_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(SIIPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(SIIPS_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "SIIPS")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim(465, 650)
ggsave(file.path(analysis_dir, paste0("SIIPS_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)

k <- cueR::plot_lineplot_twofactor_subset(SIIPS_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("SIIPS N =", length(unique(SIIPS_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "SIIPS")
Warning in geom_pointrange(aes(ymin = (.data[[mean]] - .data[[error]]), : Ignoring unknown parameters: `width`
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
 ylim(465, 650)
k

ggsave(file.path(analysis_dir, paste0("SIIPS_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)

5-3 SIIPS and PE (PEmerge)

SIIPSPE <- lmer(SIIPS ~ CUE_high_gt_low + STIM_linear + PE_mdl2 + (CUE_high_gt_low + STIM_linear   |sub),
     data = PEmerge,
     control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
summary(SIIPSPE)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: SIIPS ~ CUE_high_gt_low + STIM_linear + PE_mdl2 + (CUE_high_gt_low +      STIM_linear | sub)
   Data: PEmerge
Control: lmerControl(check.conv.singular = .makeCC(action = "ignore",      tol = 1e-04))

REML criterion at convergence: 73434.7

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-6.1080 -0.5520  0.0123  0.5535  5.0921 

Random effects:
 Groups   Name            Variance Std.Dev. Corr     
 sub      (Intercept)      71713.1 267.79            
          CUE_high_gt_low   1739.6  41.71   0.29     
          STIM_linear        258.9  16.09   0.96 0.00
 Residual                 159922.1 399.90            
Number of obs: 4937, groups:  sub, 88

Fixed effects:
                 Estimate Std. Error        df t value Pr(>|t|)    
(Intercept)      583.7659    29.2568   87.5302  19.953  < 2e-16 ***
CUE_high_gt_low   59.5861    17.4640  216.3324   3.412  0.00077 ***
STIM_linear       93.0478    18.2732 2012.3596   5.092 3.87e-07 ***
PE_mdl2            0.8563     0.4149 1181.2445   2.064  0.03926 *  
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation of Fixed Effects:
            (Intr) CUE___ STIM_l
CUE_hgh_gt_  0.028              
STIM_linear  0.129 -0.452       
PE_mdl2     -0.064  0.710 -0.639
vif(SIIPSPE)
CUE_high_gt_low     STIM_linear         PE_mdl2 
       2.019107        1.689254        2.715155 

Session wise filter

subjects_with_inadequate_data <- data %>%
  group_by(sub, CUE_high_gt_low, STIM_linear, SES_linear) %>% #SES_linear,
  dplyr::summarise(count = n(), .groups = 'drop') %>%
  filter(count < 4) %>%
  distinct(sub) %>%
  pull(sub)
df_filter_ses <- data %>%
  filter(!(sub %in% subjects_with_inadequate_data))

print(sprintf("after filtering out subjects that have less than 3 trials in cell, we have N=%d -> N=%d",length(unique(data$sub)), length(unique(df_filter_ses$sub)) ))
[1] "after filtering out subjects that have less than 3 trials in cell, we have N=97 -> N=44"

4 Sessionwise Beh

model.behses <- lmer(outcomerating_impute ~
                          CUE_high_gt_low*STIM_linear*SES_linear +
                          CUE_high_gt_low*STIM_quadratic*SES_linear +
                          CUE_high_gt_low*STIM_linear*SES_quadratic +
                          CUE_high_gt_low*STIM_quadratic*SES_quadratic +
                          (CUE_high_gt_low + STIM_linear + SES_linear |
         sub), data = df_filter_ses,
         control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
         # control = lmerControl(optimizer = "nmkbw"
                               # ))
summary(model.behses)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: outcomerating_impute ~ CUE_high_gt_low * STIM_linear * SES_linear +  
    CUE_high_gt_low * STIM_quadratic * SES_linear + CUE_high_gt_low *  
    STIM_linear * SES_quadratic + CUE_high_gt_low * STIM_quadratic *      SES_quadratic + (CUE_high_gt_low + STIM_linear + SES_linear |  
    sub)
   Data: df_filter_ses
Control: lmerControl(check.conv.singular = .makeCC(action = "ignore",      tol = 1e-04))

REML criterion at convergence: 20555.8

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.8057 -0.5810  0.0106  0.5651  4.2667 

Random effects:
 Groups   Name            Variance Std.Dev. Corr             
 sub      (Intercept)      603.02  24.556                    
          CUE_high_gt_low   67.63   8.224   -0.30            
          STIM_linear       82.33   9.074    0.58 -0.22      
          SES_linear      1663.11  40.781   -0.14  0.07 -0.09
 Residual                  292.13  17.092                    
Number of obs: 2372, groups:  sub, 44

Fixed effects:
                                              Estimate Std. Error        df t value Pr(>|t|)    
(Intercept)                                    57.1311     3.8879   40.4750  14.695  < 2e-16 ***
CUE_high_gt_low                                 6.6941     1.4487   38.9592   4.621 4.12e-05 ***
STIM_linear                                    28.8428     1.6423   41.5695  17.562  < 2e-16 ***
SES_linear                                    -12.0518     6.7229   36.9439  -1.793   0.0812 .  
STIM_quadratic                                  1.4593     0.7521 2189.6397   1.940   0.0525 .  
SES_quadratic                                  -1.8859     0.9102 2247.4062  -2.072   0.0384 *  
CUE_high_gt_low:STIM_linear                    -1.7178     1.7200 2189.6986  -0.999   0.3180    
CUE_high_gt_low:SES_linear                     -1.2866     1.8565 1774.2405  -0.693   0.4884    
STIM_linear:SES_linear                          1.6410     2.2438 1696.3550   0.731   0.4647    
CUE_high_gt_low:STIM_quadratic                 -3.0132     1.5043 2189.6209  -2.003   0.0453 *  
SES_linear:STIM_quadratic                      -2.6821     1.8427 2189.6425  -1.456   0.1457    
CUE_high_gt_low:SES_quadratic                   0.3361     1.5593 2183.1024   0.216   0.8294    
STIM_linear:SES_quadratic                      -3.3002     1.8979 2156.6074  -1.739   0.0822 .  
STIM_quadratic:SES_quadratic                    1.9730     1.6113 2189.6373   1.224   0.2209    
CUE_high_gt_low:STIM_linear:SES_linear          5.2783     4.2117 2189.7226   1.253   0.2103    
CUE_high_gt_low:SES_linear:STIM_quadratic       5.3577     3.6854 2189.6425   1.454   0.1462    
CUE_high_gt_low:STIM_linear:SES_quadratic       2.2848     3.6868 2189.6742   0.620   0.5355    
CUE_high_gt_low:STIM_quadratic:SES_quadratic   -1.4627     3.2226 2189.5944  -0.454   0.6500    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation matrix not shown by default, as p = 18 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
sjPlot::tab_model(model.behses,
                  title = "Multilevel-modeling: \nlmer(BEH ~ CUE * STIM *SES + (CUE + STIM + SES | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(BEH ~ CUE * STIM *SES + (CUE + STIM + SES | sub), data = pain)
  outcomerating_impute
Predictors Estimates CI p
(Intercept) 57.13 49.51 – 64.76 <0.001
CUE high gt low 6.69 3.85 – 9.54 <0.001
STIM linear 28.84 25.62 – 32.06 <0.001
SES linear -12.05 -25.24 – 1.13 0.073
STIM quadratic 1.46 -0.02 – 2.93 0.052
SES quadratic -1.89 -3.67 – -0.10 0.038
CUE high gt low × STIM
linear
-1.72 -5.09 – 1.66 0.318
CUE high gt low × SES
linear
-1.29 -4.93 – 2.35 0.488
STIM linear × SES linear 1.64 -2.76 – 6.04 0.465
CUE high gt low × STIM
quadratic
-3.01 -5.96 – -0.06 0.045
SES linear × STIM
quadratic
-2.68 -6.30 – 0.93 0.146
CUE high gt low × SES
quadratic
0.34 -2.72 – 3.39 0.829
STIM linear × SES
quadratic
-3.30 -7.02 – 0.42 0.082
STIM quadratic × SES
quadratic
1.97 -1.19 – 5.13 0.221
(CUE high gt low × STIM
linear) × SES linear
5.28 -2.98 – 13.54 0.210
(CUE high gt low × SES
linear) × STIM quadratic
5.36 -1.87 – 12.58 0.146
(CUE high gt low × STIM
linear) × SES quadratic
2.28 -4.94 – 9.51 0.535
(CUE high gt low × STIM
quadratic) × SES
quadratic
-1.46 -7.78 – 4.86 0.650
Random Effects
σ2 292.13
τ00 sub 603.02
τ11 sub.CUE_high_gt_low 67.63
τ11 sub.STIM_linear 82.33
τ11 sub.SES_linear 1663.11
ρ01 -0.30
0.58
-0.14
ICC 0.76
N sub 44
Observations 2372
Marginal R2 / Conditional R2 0.129 / 0.788
equatiomatic::extract_eq(model.behses)

\[ \begin{aligned} \operatorname{outcomerating\_impute}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1j[i]}(\operatorname{CUE\_high\_gt\_low}) + \beta_{2j[i]}(\operatorname{STIM\_linear}) + \beta_{3j[i]}(\operatorname{SES\_linear}) + \beta_{4}(\operatorname{STIM\_quadratic}) + \beta_{5}(\operatorname{SES\_quadratic}) + \beta_{6}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_linear}) + \beta_{7}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear}) + \beta_{8}(\operatorname{SES\_linear} \times \operatorname{STIM\_linear}) + \beta_{9}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_quadratic}) + \beta_{10}(\operatorname{SES\_linear} \times \operatorname{STIM\_quadratic}) + \beta_{11}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic}) + \beta_{12}(\operatorname{SES\_quadratic} \times \operatorname{STIM\_linear}) + \beta_{13}(\operatorname{SES\_quadratic} \times \operatorname{STIM\_quadratic}) + \beta_{14}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear} \times \operatorname{STIM\_linear}) + \beta_{15}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear} \times \operatorname{STIM\_quadratic}) + \beta_{16}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic} \times \operatorname{STIM\_linear}) + \beta_{17}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic} \times \operatorname{STIM\_quadratic}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{1j} \\ &\beta_{2j} \\ &\beta_{3j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{1j}} \\ &\mu_{\beta_{2j}} \\ &\mu_{\beta_{3j}} \end{aligned} \end{array} \right) , \left( \begin{array}{cccc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{1j}} & \rho_{\alpha_{j}\beta_{2j}} & \rho_{\alpha_{j}\beta_{3j}} \\ \rho_{\beta_{1j}\alpha_{j}} & \sigma^2_{\beta_{1j}} & \rho_{\beta_{1j}\beta_{2j}} & \rho_{\beta_{1j}\beta_{3j}} \\ \rho_{\beta_{2j}\alpha_{j}} & \rho_{\beta_{2j}\beta_{1j}} & \sigma^2_{\beta_{2j}} & \rho_{\beta_{2j}\beta_{3j}} \\ \rho_{\beta_{3j}\alpha_{j}} & \rho_{\beta_{3j}\beta_{1j}} & \rho_{\beta_{3j}\beta_{2j}} & \sigma^2_{\beta_{3j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

# Check why convergence is negative ____________________________________________
abeh <- lme4::allFit(model.behses)
bobyqa : [OK]
Nelder_Mead : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 1.13484 (tol =
0.002, component 1)
[OK]
nlminbwrap : [OK]
nmkbw : [OK]
optimx.L-BFGS-B : 
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00784696 (tol =
0.002, component 1)
[OK]
nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
nloptwrap.NLOPT_LN_BOBYQA : [OK]
opt.summary <- glance(abeh) |> select(optimizer, AIC, NLL_rel) |> arrange(NLL_rel)
print(opt.summary)
# A tibble: 7 × 3
  optimizer                        AIC      NLL_rel
  <chr>                          <dbl>        <dbl>
1 bobyqa                        20614. 0           
2 nlminbwrap                    20614. 0.0000000733
3 nloptwrap.NLOPT_LN_NELDERMEAD 20614. 0.000000199 
4 nmkbw                         20614. 0.000000468 
5 nloptwrap.NLOPT_LN_BOBYQA     20614. 0.00000132  
6 optimx.L-BFGS-B               20614. 0.0000431   
7 Nelder_Mead                   20615. 0.687       

5 Sessionwise NPS

model.NPSses <- lmer(NPS ~
                          CUE_high_gt_low*STIM_linear*SES_linear +
                          CUE_high_gt_low*STIM_quadratic*SES_linear +
                          CUE_high_gt_low*STIM_linear*SES_quadratic +
                          CUE_high_gt_low*STIM_quadratic*SES_quadratic +
                          (CUE_high_gt_low + STIM_linear +SES_linear |
         sub), data = df_filter_ses,
    control = lmerControl(
      optimizer = "optimx",
      optCtrl = list(
        method = "nmkb",
        maxit = 1e9,
        maxfun = 1e9,
        maxeval = 1e7,
        xtol_abs = 1e-9,
        ftol_abs = 1e-9
      )
    )
  )
Warning in ctrl[namc] <- control: number of items to replace is not a multiple of replacement length
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 1 negative
eigenvalues
Warning: Model failed to converge with 1 negative eigenvalue: -8.6e-03
summary(model.NPSses)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: NPS ~ CUE_high_gt_low * STIM_linear * SES_linear + CUE_high_gt_low *  
    STIM_quadratic * SES_linear + CUE_high_gt_low * STIM_linear *      SES_quadratic + CUE_high_gt_low * STIM_quadratic * SES_quadratic +  
    (CUE_high_gt_low + STIM_linear + SES_linear | sub)
   Data: df_filter_ses
Control: lmerControl(optimizer = "optimx", optCtrl = list(method = "nmkb",  
    maxit = 1e+09, maxfun = 1e+09, maxeval = 1e+07, xtol_abs = 1e-09,      ftol_abs = 1e-09))

REML criterion at convergence: 12378.5

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.9321 -0.5543 -0.0081  0.5686  5.3204 

Random effects:
 Groups   Name            Variance  Std.Dev. Corr             
 sub      (Intercept)      2.300694 1.51680                   
          CUE_high_gt_low  0.139566 0.37359  -0.57            
          STIM_linear      0.004609 0.06789   0.34  0.58      
          SES_linear       4.796639 2.19012  -0.16 -0.68 -0.93
 Residual                 10.020358 3.16549                   
Number of obs: 2376, groups:  sub, 44

Fixed effects:
                                               Estimate Std. Error         df t value Pr(>|t|)    
(Intercept)                                     4.73676    0.24820   43.52579  19.085   <2e-16 ***
CUE_high_gt_low                                -0.03354    0.14242   43.21367  -0.236   0.8149    
STIM_linear                                     1.33863    0.15947 1305.94556   8.394   <2e-16 ***
SES_linear                                      0.11153    0.40678   32.94520   0.274   0.7857    
STIM_quadratic                                  0.38680    0.13919 2237.43650   2.779   0.0055 ** 
SES_quadratic                                   0.27111    0.15777 1639.77499   1.718   0.0859 .  
CUE_high_gt_low:STIM_linear                    -0.02950    0.31824 2237.43650  -0.093   0.9262    
CUE_high_gt_low:SES_linear                     -0.08578    0.32062 1489.12394  -0.268   0.7891    
STIM_linear:SES_linear                          0.32546    0.38989 2239.84736   0.835   0.4039    
CUE_high_gt_low:STIM_quadratic                 -0.05087    0.27839 2237.43650  -0.183   0.8550    
SES_linear:STIM_quadratic                       0.05290    0.34101 2237.43650   0.155   0.8767    
CUE_high_gt_low:SES_quadratic                   0.44446    0.27919 2111.56958   1.592   0.1115    
STIM_linear:SES_quadratic                      -0.12938    0.34093 2241.90566  -0.380   0.7044    
STIM_quadratic:SES_quadratic                    0.43692    0.29821 2237.43650   1.465   0.1430    
CUE_high_gt_low:STIM_linear:SES_linear          0.21128    0.77965 2237.43650   0.271   0.7864    
CUE_high_gt_low:SES_linear:STIM_quadratic       0.13414    0.68202 2237.43650   0.197   0.8441    
CUE_high_gt_low:STIM_linear:SES_quadratic       0.34457    0.68181 2237.43650   0.505   0.6133    
CUE_high_gt_low:STIM_quadratic:SES_quadratic    0.22317    0.59643 2237.43650   0.374   0.7083    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation matrix not shown by default, as p = 18 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
optimizer (optimx) convergence code: 0 (OK)
unable to evaluate scaled gradient
Model failed to converge: degenerate  Hessian with 1 negative eigenvalues
number of items to replace is not a multiple of replacement length
sjPlot::tab_model(model.NPSses,
                  title = "Multilevel-modeling: \nlmer(NPS ~ CUE * STIM * SES+ (CUE + STIM + SES| sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(NPS ~ CUE * STIM * SES+ (CUE + STIM + SES| sub), data = pain)
  NPS
Predictors Estimates CI p
(Intercept) 4.74 4.25 – 5.22 <0.001
CUE high gt low -0.03 -0.31 – 0.25 0.814
STIM linear 1.34 1.03 – 1.65 <0.001
SES linear 0.11 -0.69 – 0.91 0.784
STIM quadratic 0.39 0.11 – 0.66 0.005
SES quadratic 0.27 -0.04 – 0.58 0.086
CUE high gt low × STIM
linear
-0.03 -0.65 – 0.59 0.926
CUE high gt low × SES
linear
-0.09 -0.71 – 0.54 0.789
STIM linear × SES linear 0.33 -0.44 – 1.09 0.404
CUE high gt low × STIM
quadratic
-0.05 -0.60 – 0.50 0.855
SES linear × STIM
quadratic
0.05 -0.62 – 0.72 0.877
CUE high gt low × SES
quadratic
0.44 -0.10 – 0.99 0.112
STIM linear × SES
quadratic
-0.13 -0.80 – 0.54 0.704
STIM quadratic × SES
quadratic
0.44 -0.15 – 1.02 0.143
(CUE high gt low × STIM
linear) × SES linear
0.21 -1.32 – 1.74 0.786
(CUE high gt low × SES
linear) × STIM quadratic
0.13 -1.20 – 1.47 0.844
(CUE high gt low × STIM
linear) × SES quadratic
0.34 -0.99 – 1.68 0.613
(CUE high gt low × STIM
quadratic) × SES
quadratic
0.22 -0.95 – 1.39 0.708
Random Effects
σ2 10.02
τ00 sub 2.30
τ11 sub.CUE_high_gt_low 0.14
τ11 sub.STIM_linear 0.00
τ11 sub.SES_linear 4.80
ρ01 -0.57
0.34
-0.16
ICC 0.24
N sub 44
Observations 2376
Marginal R2 / Conditional R2 0.028 / 0.259
equatiomatic::extract_eq(model.NPSses)

\[ \begin{aligned} \operatorname{NPS}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1j[i]}(\operatorname{CUE\_high\_gt\_low}) + \beta_{2j[i]}(\operatorname{STIM\_linear}) + \beta_{3j[i]}(\operatorname{SES\_linear}) + \beta_{4}(\operatorname{STIM\_quadratic}) + \beta_{5}(\operatorname{SES\_quadratic}) + \beta_{6}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_linear}) + \beta_{7}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear}) + \beta_{8}(\operatorname{SES\_linear} \times \operatorname{STIM\_linear}) + \beta_{9}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_quadratic}) + \beta_{10}(\operatorname{SES\_linear} \times \operatorname{STIM\_quadratic}) + \beta_{11}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic}) + \beta_{12}(\operatorname{SES\_quadratic} \times \operatorname{STIM\_linear}) + \beta_{13}(\operatorname{SES\_quadratic} \times \operatorname{STIM\_quadratic}) + \beta_{14}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear} \times \operatorname{STIM\_linear}) + \beta_{15}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear} \times \operatorname{STIM\_quadratic}) + \beta_{16}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic} \times \operatorname{STIM\_linear}) + \beta_{17}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic} \times \operatorname{STIM\_quadratic}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{1j} \\ &\beta_{2j} \\ &\beta_{3j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{1j}} \\ &\mu_{\beta_{2j}} \\ &\mu_{\beta_{3j}} \end{aligned} \end{array} \right) , \left( \begin{array}{cccc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{1j}} & \rho_{\alpha_{j}\beta_{2j}} & \rho_{\alpha_{j}\beta_{3j}} \\ \rho_{\beta_{1j}\alpha_{j}} & \sigma^2_{\beta_{1j}} & \rho_{\beta_{1j}\beta_{2j}} & \rho_{\beta_{1j}\beta_{3j}} \\ \rho_{\beta_{2j}\alpha_{j}} & \rho_{\beta_{2j}\beta_{1j}} & \sigma^2_{\beta_{2j}} & \rho_{\beta_{2j}\beta_{3j}} \\ \rho_{\beta_{3j}\alpha_{j}} & \rho_{\beta_{3j}\beta_{1j}} & \rho_{\beta_{3j}\beta_{2j}} & \sigma^2_{\beta_{3j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

optimx_options <- c("L-BFGS-B", "nlminb", "nlm", "bobyqa", "nmkb", "hjkb")
for(i in 1:length(optimx_options)){
  print(paste0("Testing optimx: ", optimx_options[i]))
  model_flex <-   lmer(
    NPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      (CUE_high_gt_low + SES_linear + SES_quadratic + STIM_linear + STIM_quadratic |
         sub),
    data = df_filter_ses,
    control = lmerControl(
      optimizer = "optimx",
      optCtrl = list(
        method = optimx_options[i],
        maxit = 1e9,
        maxfun = 1e9,
        maxeval = 1e7,
        xtol_abs = 1e-9,
        ftol_abs = 1e-9
      )
    )
  )

  if(is.null(model_flex@optinfo$conv$lme4$messages)){
    print(paste0("One of the optimx options, ", optimx_options[i],", worked!"))
    print(model_flex)
    break
  }
}
[1] "Testing optimx: L-BFGS-B"
Warning in optim(par = par, fn = ufn, gr = ugr, lower = lower, upper = upper, : unknown names in control: maxfun, maxeval, xtol_abs,
ftol_abs
boundary (singular) fit: see help('isSingular')
[1] "Testing optimx: nlminb"
Warning in nlminb(start = par, objective = ufn, gradient = ugr, lower = lower, : unrecognized control elements named 'maxfun', 'maxeval',
'xtol_abs', 'ftol_abs' ignored
boundary (singular) fit: see help('isSingular')
[1] "Testing optimx: nlm"
Warning in nlminb(start = par, objective = ufn, gradient = ugr, lower = lower, : unrecognized control elements named 'maxfun', 'maxeval',
'xtol_abs', 'ftol_abs' ignored
boundary (singular) fit: see help('isSingular')
[1] "Testing optimx: bobyqa"
Warning in (function (npt = min(n + 2L, 2L * n), rhobeg = NA, rhoend = NA, : unused control arguments ignored
boundary (singular) fit: see help('isSingular')
[1] "Testing optimx: nmkb"
Warning in ctrl[namc] <- control: number of items to replace is not a multiple of replacement length
boundary (singular) fit: see help('isSingular')
[1] "Testing optimx: hjkb"
Warning in cntrl[nmsCo] <- control: number of items to replace is not a multiple of replacement length
step    nofc    fmin    xpar
1    44      12618.31    1 ...
2    429     12368   0.5 ...
3    543     12367.21    0.5 ...
4    657     12360.07    0.5 ...
5    848     12358.46    0.5 ...
6    1196    12355.68    0.46875 ...
7    1549    12355.43    0.453125 ...
8    1749    12355.36    0.4609375 ...
9    1950    12355.34    0.4570312 ...
10   2151    12355.33    0.4570312 ...
11   3104    12355.33    0.4580078 ...
12   3456    12355.33    0.4580078 ...
13   3730    12355.33    0.4577637 ...
14   4004    12355.33    0.4577637 ...
15   4122    12355.33    0.4577637 ...
16   4240    12355.33    0.4577637 ...
17   4436    12355.33    0.4577484 ...
18   4632    12355.33    0.4577484 ...
19   4906    12355.33    0.4577484 ...
boundary (singular) fit: see help('isSingular')
Warning: Model failed to converge with 2 negative eigenvalues: -6.1e-02 -9.7e+00

6 Sessionwise SIIPS

random slopes of cue effects were colinear with random intercepts with a correlation of 1, therefore estimating the idnvidual difference baseline and dindividual difference of cue effects were impossible

with(df_filter_ses, table(CUE_high_gt_low, STIM_linear, SES_linear))
, , SES_linear = -0.5

               STIM_linear
CUE_high_gt_low -0.5   0 0.5
           -0.5  128 128 128
           0.5   128 128 128

, , SES_linear = 0

               STIM_linear
CUE_high_gt_low -0.5   0 0.5
           -0.5  132 132 132
           0.5   132 132 132

, , SES_linear = 0.5

               STIM_linear
CUE_high_gt_low -0.5   0 0.5
           -0.5  136 136 136
           0.5   136 136 136
library(glmmTMB)
model.SIIPSses <- lmer(SIIPS ~
                          CUE_high_gt_low*STIM_linear*SES_linear +
                          CUE_high_gt_low*STIM_quadratic*SES_linear +
                          CUE_high_gt_low*STIM_linear*SES_quadratic +
                          CUE_high_gt_low*STIM_quadratic*SES_quadratic +
                          (1+ CUE_high_gt_low + STIM_quadratic + SES_linear|sub),
         data = df_filter_ses,
                  control = lmerControl(optimizer = "nmkbw"
                               ))
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00464962 (tol =
0.002, component 1)
summary(model.SIIPSses)
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: SIIPS ~ CUE_high_gt_low * STIM_linear * SES_linear + CUE_high_gt_low *  
    STIM_quadratic * SES_linear + CUE_high_gt_low * STIM_linear *      SES_quadratic + CUE_high_gt_low * STIM_quadratic * SES_quadratic +  
    (1 + CUE_high_gt_low + STIM_quadratic + SES_linear | sub)
   Data: df_filter_ses
Control: lmerControl(optimizer = "nmkbw")

REML criterion at convergence: 34296.1

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0754 -0.5766  0.0244  0.5720  5.2361 

Random effects:
 Groups   Name            Variance Std.Dev. Corr             
 sub      (Intercept)      60450   245.87                    
          CUE_high_gt_low   1888    43.45    0.13            
          STIM_quadratic    7729    87.92    0.39  0.08      
          SES_linear       33645   183.43   -0.24  0.23 -0.94
 Residual                 107187   327.39                    
Number of obs: 2376, groups:  sub, 44

Fixed effects:
                                             Estimate Std. Error       df t value Pr(>|t|)    
(Intercept)                                   545.354     38.223   42.294  14.268  < 2e-16 ***
CUE_high_gt_low                                14.105     15.111   35.145   0.933   0.3570    
STIM_linear                                   121.136     16.457 2188.708   7.361 2.58e-13 ***
SES_linear                                      8.258     36.270   25.450   0.228   0.8217    
STIM_quadratic                                 20.103     19.854   37.825   1.013   0.3177    
SES_quadratic                                   8.918     16.001 1364.870   0.557   0.5774    
CUE_high_gt_low:STIM_linear                   -19.787     32.915 2188.708  -0.601   0.5478    
CUE_high_gt_low:SES_linear                    -18.557     33.374 1322.426  -0.556   0.5783    
STIM_linear:SES_linear                         30.159     40.318 2188.708   0.748   0.4545    
CUE_high_gt_low:STIM_quadratic                 -5.768     28.793 2188.708  -0.200   0.8412    
SES_linear:STIM_quadratic                     -35.822     36.313 1439.624  -0.986   0.3241    
CUE_high_gt_low:SES_quadratic                  30.691     28.975 2025.616   1.059   0.2896    
STIM_linear:SES_quadratic                      18.933     35.258 2188.708   0.537   0.5913    
STIM_quadratic:SES_quadratic                   60.722     31.247 2071.576   1.943   0.0521 .  
CUE_high_gt_low:STIM_linear:SES_linear         43.819     80.636 2188.708   0.543   0.5869    
CUE_high_gt_low:SES_linear:STIM_quadratic      66.566     70.538 2188.708   0.944   0.3454    
CUE_high_gt_low:STIM_linear:SES_quadratic    -130.658     70.517 2188.708  -1.853   0.0640 .  
CUE_high_gt_low:STIM_quadratic:SES_quadratic   33.497     61.686 2188.708   0.543   0.5872    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Correlation matrix not shown by default, as p = 18 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
optimizer (nmkbw) convergence code: 0 (OK)
Model failed to converge with max|grad| = 0.00464962 (tol = 0.002, component 1)
sjPlot::tab_model(model.SIIPSses,
                  title = "Multilevel-modeling: \nlmer(SIIPS ~ CUE * STIM * SES+ (CUE + STIM *SES | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
Multilevel-modeling: lmer(SIIPS ~ CUE * STIM * SES+ (CUE + STIM *SES | sub), data = pain)
  SIIPS
Predictors Estimates CI p
(Intercept) 545.35 470.40 – 620.31 <0.001
CUE high gt low 14.10 -15.53 – 43.74 0.351
STIM linear 121.14 88.86 – 153.41 <0.001
SES linear 8.26 -62.87 – 79.38 0.820
STIM quadratic 20.10 -18.83 – 59.04 0.311
SES quadratic 8.92 -22.46 – 40.30 0.577
CUE high gt low × STIM
linear
-19.79 -84.33 – 44.76 0.548
CUE high gt low × SES
linear
-18.56 -84.00 – 46.89 0.578
STIM linear × SES linear 30.16 -48.90 – 109.22 0.455
CUE high gt low × STIM
quadratic
-5.77 -62.23 – 50.69 0.841
SES linear × STIM
quadratic
-35.82 -107.03 – 35.39 0.324
CUE high gt low × SES
quadratic
30.69 -26.13 – 87.51 0.290
STIM linear × SES
quadratic
18.93 -50.21 – 88.07 0.591
STIM quadratic × SES
quadratic
60.72 -0.55 – 122.00 0.052
(CUE high gt low × STIM
linear) × SES linear
43.82 -114.31 – 201.94 0.587
(CUE high gt low × SES
linear) × STIM quadratic
66.57 -71.76 – 204.89 0.345
(CUE high gt low × STIM
linear) × SES quadratic
-130.66 -268.94 – 7.62 0.064
(CUE high gt low × STIM
quadratic) × SES
quadratic
33.50 -87.47 – 154.46 0.587
Random Effects
σ2 107187.41
τ00 sub 60449.67
τ11 sub.CUE_high_gt_low 1888.25
τ11 sub.STIM_quadratic 7729.14
τ11 sub.SES_linear 33644.96
ρ01 0.13
0.39
-0.24
ICC 0.39
N sub 44
Observations 2376
Marginal R2 / Conditional R2 0.018 / 0.399
equatiomatic::extract_eq(model.SIIPSses)

\[ \begin{aligned} \operatorname{SIIPS}_{i} &\sim N \left(\mu, \sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1j[i]}(\operatorname{CUE\_high\_gt\_low}) + \beta_{2}(\operatorname{STIM\_linear}) + \beta_{3j[i]}(\operatorname{SES\_linear}) + \beta_{4j[i]}(\operatorname{STIM\_quadratic}) + \beta_{5}(\operatorname{SES\_quadratic}) + \beta_{6}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_linear}) + \beta_{7}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear}) + \beta_{8}(\operatorname{SES\_linear} \times \operatorname{STIM\_linear}) + \beta_{9}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{STIM\_quadratic}) + \beta_{10}(\operatorname{SES\_linear} \times \operatorname{STIM\_quadratic}) + \beta_{11}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic}) + \beta_{12}(\operatorname{SES\_quadratic} \times \operatorname{STIM\_linear}) + \beta_{13}(\operatorname{SES\_quadratic} \times \operatorname{STIM\_quadratic}) + \beta_{14}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear} \times \operatorname{STIM\_linear}) + \beta_{15}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_linear} \times \operatorname{STIM\_quadratic}) + \beta_{16}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic} \times \operatorname{STIM\_linear}) + \beta_{17}(\operatorname{CUE\_high\_gt\_low} \times \operatorname{SES\_quadratic} \times \operatorname{STIM\_quadratic}) \\ \left( \begin{array}{c} \begin{aligned} &\alpha_{j} \\ &\beta_{1j} \\ &\beta_{3j} \\ &\beta_{4j} \end{aligned} \end{array} \right) &\sim N \left( \left( \begin{array}{c} \begin{aligned} &\mu_{\alpha_{j}} \\ &\mu_{\beta_{1j}} \\ &\mu_{\beta_{3j}} \\ &\mu_{\beta_{4j}} \end{aligned} \end{array} \right) , \left( \begin{array}{cccc} \sigma^2_{\alpha_{j}} & \rho_{\alpha_{j}\beta_{1j}} & \rho_{\alpha_{j}\beta_{3j}} & \rho_{\alpha_{j}\beta_{4j}} \\ \rho_{\beta_{1j}\alpha_{j}} & \sigma^2_{\beta_{1j}} & \rho_{\beta_{1j}\beta_{3j}} & \rho_{\beta_{1j}\beta_{4j}} \\ \rho_{\beta_{3j}\alpha_{j}} & \rho_{\beta_{3j}\beta_{1j}} & \sigma^2_{\beta_{3j}} & \rho_{\beta_{3j}\beta_{4j}} \\ \rho_{\beta_{4j}\alpha_{j}} & \rho_{\beta_{4j}\beta_{1j}} & \rho_{\beta_{4j}\beta_{3j}} & \sigma^2_{\beta_{4j}} \end{array} \right) \right) \text{, for sub j = 1,} \dots \text{,J} \end{aligned} \]

Session is inversly colinear with stimulus intensity, which makes the random slopes impossible to estimate

library(ggplot2); theme_set(theme_bw())
library(ggalt)
(ggplot(df_filter_ses, aes(STIM_linear,SIIPS,SES_linear,colour=CUE_high_gt_low))
    +geom_point()
    +geom_encircle(aes(group=sub))
    +theme(legend.position="none")
    + scale_y_log10()
)
Warning in transformation$transform(x): NaNs produced
Warning in scale_y_log10(): log-10 transformation introduced infinite values.
Warning in transformation$transform(x): NaNs produced
Warning in scale_y_log10(): log-10 transformation introduced infinite values.
Warning: Removed 159 rows containing missing values or values outside the scale range (`geom_point()`).
Warning: Removed 159 rows containing missing values or values outside the scale range (`geom_encircle()`).

linear estimator

optimx_options <- c("L-BFGS-B", "nlminb", "nlm", "bobyqa", "nmkb", "hjkb")
for(i in 1:length(optimx_options)){
  print(paste0("Testing optimx: ", optimx_options[i]))
  model_flex <-   lmer(
    SIIPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      (1+ CUE_high_gt_low + STIM_quadratic + SES_linear  |
         sub),
    data = df_filter_ses,
    control = lmerControl(
      optimizer = "optimx",
      optCtrl = list(
        method = optimx_options[i],
        maxit = 1e9,
        maxfun = 1e9,
        maxeval = 1e3,
        xtol_abs = 1e-3,
        ftol_abs = 1e-3
      )
    )
  )

  if(is.null(model_flex@optinfo$conv$lme4$messages)){
    print(paste0("One of the optimx options, ", optimx_options[i],", worked!"))
    print(model_flex)
    break
  }
}
[1] "Testing optimx: L-BFGS-B"
Warning in optim(par = par, fn = ufn, gr = ugr, lower = lower, upper = upper, : unknown names in control: maxfun, maxeval, xtol_abs,
ftol_abs
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.00679432 (tol =
0.002, component 1)
[1] "Testing optimx: nlminb"
Warning in nlminb(start = par, objective = ufn, gradient = ugr, lower = lower, : unrecognized control elements named 'maxfun', 'maxeval',
'xtol_abs', 'ftol_abs' ignored
[1] "One of the optimx options, nlminb, worked!"
Linear mixed model fit by REML ['lmerModLmerTest']
Formula: SIIPS ~ CUE_high_gt_low * STIM_linear * SES_linear + CUE_high_gt_low *  
    STIM_quadratic * SES_linear + CUE_high_gt_low * STIM_linear *      SES_quadratic + CUE_high_gt_low * STIM_quadratic * SES_quadratic +  
    (1 + CUE_high_gt_low + STIM_quadratic + SES_linear | sub)
   Data: df_filter_ses
REML criterion at convergence: 34296.09
Random effects:
 Groups   Name            Std.Dev. Corr             
 sub      (Intercept)     245.91                    
          CUE_high_gt_low  43.44    0.13            
          STIM_quadratic   87.92    0.39  0.08      
          SES_linear      183.42   -0.24  0.23 -0.94
 Residual                 327.39                    
Number of obs: 2376, groups:  sub, 44
Fixed Effects:
                                 (Intercept)                               CUE_high_gt_low                                   STIM_linear  
                                     545.355                                        14.103                                       121.136  
                                  SES_linear                                STIM_quadratic                                 SES_quadratic  
                                       8.267                                        20.104                                         8.921  
                 CUE_high_gt_low:STIM_linear                    CUE_high_gt_low:SES_linear                        STIM_linear:SES_linear  
                                     -19.787                                       -18.558                                        30.159  
              CUE_high_gt_low:STIM_quadratic                     SES_linear:STIM_quadratic                 CUE_high_gt_low:SES_quadratic  
                                      -5.768                                       -35.824                                        30.689  
                   STIM_linear:SES_quadratic                  STIM_quadratic:SES_quadratic        CUE_high_gt_low:STIM_linear:SES_linear  
                                      18.933                                        60.720                                        43.819  
   CUE_high_gt_low:SES_linear:STIM_quadratic     CUE_high_gt_low:STIM_linear:SES_quadratic  CUE_high_gt_low:STIM_quadratic:SES_quadratic  
                                      66.566                                      -130.658                                        33.497  
optimizer (optimx) convergence code: 0 (OK) ; 1 optimizer warnings; 0 lme4 warnings 
# non linear options
algoptions <- c("NLOPT_LN_PRAXIS", "NLOPT_GN_CRS2_LM",
"NLOPT_LN_COBYLA", "NLOPT_LN_NEWUOA",
"NLOPT_LN_NEWUOA_BOUND", "NLOPT_LN_NELDERMEAD",
"NLOPT_LN_SBPLX", "NLOPT_LN_BOBYQA")

for(i in 1:length(algoptions)){
  print(paste0("Testing nloptwrap: ", algoptions[i]))
  model_flex <- lmer(    SIIPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      ( 1 +CUE_high_gt_low + STIM_quadratic + SES_linear  |
         sub),
                     data = df_filter_ses, REML=FALSE,
                     control = lmerControl(optimizer = "nloptwrap",
                                           optCtrl = list(algorithm = algoptions[i],
                                                          maxit = 1e9,
                                                          maxfun = 1e9,
                                                          maxeval = 1e9,
                                                          xtol_abs = 1e-2,
                                                          ftol_abs = 1e-2)))
  if(is.null(model_flex@optinfo$conv$lme4$messages)){
    print(paste0("One of the nloptwrap options, ", algoptions[i],", worked!"))
    print(model_flex)
    break
  }
}
[1] "Testing nloptwrap: NLOPT_LN_PRAXIS"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 1 negative
eigenvalues
Warning: Model failed to converge with 1 negative eigenvalue: -1.0e+01
[1] "Testing nloptwrap: NLOPT_GN_CRS2_LM"
Warning in optwrap(optimizer, devfun, getStart(start, rho$pp), lower = rho$lower, : convergence code -2 from nloptwrap: NLOPT_INVALID_ARGS:
Invalid arguments (e.g. lower bounds are bigger than upper bounds, an unknown algorithm was specified, etcetera).
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 3 negative
eigenvalues
Warning: Model failed to converge with 3 negative eigenvalues: -4.8e-01 -3.4e+01 -4.7e+01
[1] "Testing nloptwrap: NLOPT_LN_COBYLA"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.322768 (tol =
0.002, component 1)
[1] "Testing nloptwrap: NLOPT_LN_NEWUOA"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 1.37552 (tol =
0.002, component 1)
[1] "Testing nloptwrap: NLOPT_LN_NEWUOA_BOUND"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 1 negative
eigenvalues
Warning: Model failed to converge with 1 negative eigenvalue: -2.2e+01
[1] "Testing nloptwrap: NLOPT_LN_NELDERMEAD"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge with max|grad| = 0.283849 (tol =
0.002, component 1)
[1] "Testing nloptwrap: NLOPT_LN_SBPLX"
boundary (singular) fit: see help('isSingular')
[1] "Testing nloptwrap: NLOPT_LN_BOBYQA"
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : unable to evaluate scaled gradient
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model failed to converge: degenerate Hessian with 1 negative
eigenvalues
Warning: Model failed to converge with 1 negative eigenvalue: -4.9e+01
  model.glmm <- glmmTMB::glmmTMB(    SIIPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      ( CUE_high_gt_low + SES_linear  + STIM_linear  |         sub),
                     data = df_filter_ses)
Warning in finalizeTMB(TMBStruc, obj, fit, h, data.tmb.old): Model convergence problem; non-positive-definite Hessian matrix. See
vignette('troubleshooting')
summary(model.glmm)
 Family: gaussian  ( identity )
Formula:          SIIPS ~ CUE_high_gt_low * STIM_linear * SES_linear + CUE_high_gt_low *  
    STIM_quadratic * SES_linear + CUE_high_gt_low * STIM_linear *      SES_quadratic + CUE_high_gt_low * STIM_quadratic * SES_quadratic +  
    (CUE_high_gt_low + SES_linear + STIM_linear | sub)
Data: df_filter_ses

     AIC      BIC   logLik deviance df.resid 
      NA       NA       NA       NA     2347 

Random effects:

Conditional model:
 Groups   Name            Variance  Std.Dev.  Corr              
 sub      (Intercept)     5.496e+04 2.344e+02                   
          CUE_high_gt_low 4.959e-10 2.227e-05  1.00             
          SES_linear      1.737e-20 1.318e-10 -0.89 -0.87       
          STIM_linear     2.643e-15 5.141e-08 -0.79 -0.77  0.98 
 Residual                 1.114e+05 3.338e+02                   
Number of obs: 2376, groups:  sub, 44

Dispersion estimate for gaussian family (sigma^2): 1.11e+05 

Conditional model:
                                              Estimate Std. Error z value Pr(>|z|)    
(Intercept)                                   551.6641    36.1360  15.266  < 2e-16 ***
CUE_high_gt_low                                15.5396    13.7004   1.134   0.2567    
STIM_linear                                   121.1456    16.7795   7.220  5.2e-13 ***
SES_linear                                     -8.1260    18.9826  -0.428   0.6686    
STIM_quadratic                                 23.8181    14.6783   1.623   0.1047    
SES_quadratic                                   0.9281    15.5296   0.060   0.9523    
CUE_high_gt_low:STIM_linear                   -19.9400    33.5590  -0.594   0.5524    
CUE_high_gt_low:SES_linear                    -17.6618    33.5642  -0.526   0.5987    
STIM_linear:SES_linear                         29.2349    41.1076   0.711   0.4770    
CUE_high_gt_low:STIM_quadratic                 -5.5709    29.3566  -0.190   0.8495    
SES_linear:STIM_quadratic                     -47.7892    35.9598  -1.329   0.1839    
CUE_high_gt_low:SES_quadratic                  29.0409    29.3520   0.989   0.3225    
STIM_linear:SES_quadratic                      18.6771    35.9488   0.520   0.6034    
STIM_quadratic:SES_quadratic                   57.0768    31.4470   1.815   0.0695 .  
CUE_high_gt_low:STIM_linear:SES_linear         40.6604    82.2151   0.495   0.6209    
CUE_high_gt_low:SES_linear:STIM_quadratic      66.4087    71.9196   0.923   0.3558    
CUE_high_gt_low:STIM_linear:SES_quadratic    -130.8349    71.8975  -1.820   0.0688 .  
CUE_high_gt_low:STIM_quadratic:SES_quadratic   34.8856    62.8941   0.555   0.5791    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
dv <- "SIIPS"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
SIIPS_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2, "ses"), dv)
SIIPS_groupwise <- summarySEwithin(
  data = SIIPS_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2, "ses"),
  idvar = subject
)
Automatically converting the following non-factors to factors: ses
SIIPS_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "SIIPS"
ylim <- c(-10, 60)
dv_keyword <- "siips"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
---
title: "ch4_painprocessing"
output: html_document
date: "2025-07-17"
---

```{r libraries_SIIPS_stim, message=FALSE, warning=FALSE, include=FALSE, paged.print=FALSE}
library(car)
library(cueR)
library(lme4)
library(optimx)
library(minqa)
library(dfoptim)
library(tidyverse)
library(psych)
library(reshape)
library(dplyr)
# library(tidyselect)
library(tidyr)
library(stringr)
library(lmerTest)
library(gghalves)
library(plyr)
library(ggpubr)
library(r2mlm)
library(effectsize)
# library(devtools)
options(es.use_symbols = TRUE) # get nice symbols when printing! (On Windows, requires R >= 4.2.0)
library(EMAtools)
library(emmeans)
library(sjPlot)
library(sjmisc)
library(sjlabelled)
library(plotly)
library(DT)
library(scico)
library(raincloudplots)
# devtools::source_url("https://raw.githubusercontent.com/RainCloudPlots/RainCloudPlots/master/tutorial_R/R_rainclouds.R")
# devtools::source_url("https://raw.githubusercontent.com/RainCloudPlots/RainCloudPlots/master/tutorial_R/summarySE.R")
library(broom)
library(MuMIn)
# devtools::source_url("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")
library(r2mlm)
main_dir <- dirname(dirname(getwd()))
file.sources = list.files(file.path(main_dir, 'scripts', 'step02_R', 'utils'),
                          pattern="*.R",
                          full.names=TRUE,
                          ignore.case=TRUE)
sapply(file.sources,source,.GlobalEnv)

```

```{r}
dirname(dirname(getwd()))
```

### Prepare dataframe

```{r}
analysis_folder <- "fmri_nps_canlab"

beh <- readr::read_tsv(file.path(main_dir, "analysis/fmri/nilearn/singletrial_rampupplateau/beh/sub-all_task-all_events.tsv"))
dv <- "nps"
base_dir <- file.path(main_dir, "analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab")
all_nps <- read.csv(file.path(base_dir, "signature-NPS_sub-all_runtype-pvc_event-stimulus.csv"))
all_siips <-  read.csv(file.path(base_dir, "signature-SIIPS_sub-all_runtype-pain_event-stimulus.csv"))

############# NOTE: run once; concatenate all csv files ########################
# file_paths <- list.files(base_dir, pattern =  "sub-.*_signature-NPSroi_.*\\.csv$", full.names = TRUE, recursive = TRUE)
# all_nps <- file_paths %>%
#   map_dfr(~read_csv(.x, show_col_types = FALSE), .id = "file_path")
#
# file_paths <- list.files(base_dir, pattern =  "sub-.*_roi-SIIPS.*\\.csv$", full.names = TRUE, recursive = TRUE)
# all_siips <- file_paths %>%
#   map_dfr(~read_csv(.x, show_col_types = FALSE), .id = "file_path")
################################################################################

df_merge <- inner_join(beh, all_nps, by = "singletrial_fname")
df_merge <- df_merge %>%
  mutate(singletrial_fname = as.character(singletrial_fname)) %>%
  extract(singletrial_fname, into = c("sub", "ses", "run", "runtype", "event", "trial", "cuetype", "stimintensity"),
          regex = "^(sub-\\d+)_(ses-\\d+)_(run-\\d+)_runtype-(pain|vicarious|cognitive)_event-(cue|stimulus)_trial-(\\d+)_cuetype-(high|low)(?:_stimintensity-(high|med|low))?\\.nii.gz$",
          remove = FALSE)

# Adjust the dataframe to handle NA for missing stimintensity, if tidyr version doesn't support `fill`
df_merge$stimintensity[df_merge$stimintensity == ""] <- NA

data <-df_merge[df_merge$runtype == "pain" ,]

# contrast code ________________________________________________________________
data$stim <- NA; data$STIM_linear <- NA; data$STIM_quadratic <- NA;
data$CUE_high_gt_low <- NA;
data$SES_linear <- NA;data$SES_quadratic <- NA
data$stim[data$stimulusintensity == "low_stim"] <-  -0.5 # social influence task
data$stim[data$stimulusintensity == "med_stim"] <- 0 # no influence task
data$stim[data$stimulusintensity == "high_stim"] <-  0.5 # no influence task

data$STIM <- factor(data$stimulusintensity)

# contrast code 1 linear
data$STIM_linear[data$stimulusintensity == "low_stim"] <- -0.5
data$STIM_linear[data$stimulusintensity == "med_stim"] <- 0
data$STIM_linear[data$stimulusintensity == "high_stim"] <- 0.5

# contrast code 2 quadratic
data$STIM_quadratic[data$stimulusintensity == "low_stim"] <- -0.33
data$STIM_quadratic[data$stimulusintensity == "med_stim"] <- 0.66
data$STIM_quadratic[data$stimulusintensity == "high_stim"] <- -0.33

# social cude contrast
data$CUE_high_gt_low[data$cue == "low_cue"] <-  -0.5 # social influence task
data$CUE_high_gt_low[data$cue == "high_cue"] <-  0.5 # no influence task

data$EXPECT <- data$expectrating
data$OUTCOME <- data$outcomerating


data$SES_linear[data$ses == "ses-01"] <- -0.5
data$SES_linear[data$ses == "ses-03"] <- 0
data$SES_linear[data$ses == "ses-04"] <- 0.5

# contrast code 2 quadratic
data$SES_quadratic[data$ses == "ses-01"] <- -0.33
data$SES_quadratic[data$ses == "ses-03"] <- 0.66
data$SES_quadratic[data$ses == "ses-04"] <- -0.33

stim_con1 <- "STIM_linear"
stim_con2 <- "STIM_quadratic"
iv1 <- "CUE_high_gt_low"
dv <- "nps"
dv_keyword <- "NPS"
```

```{r}
# load dataframes ______________________________________________________________

# SIIPS <- siips %>%
#   select(-X)
# NPS <- nps %>%
#   select(-X)
beh <- readr::read_tsv(file.path(main_dir, "analysis/fmri/nilearn/singletrial_rampupplateau/beh/sub-all_task-all_events.tsv"))
# base_dir <- "/Volumes/spacetop_projects_cue/analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab"
NPS <- read.csv(file.path(main_dir, "analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab/signature-NPS_sub-all_runtype-pvc_event-stimulus.csv"))
SIIPS <-  read.csv(file.path(main_dir, "analysis/fmri/nilearn/deriv01_signature/rampup_plateau_canlab/signature-SIIPS_sub-all_runtype-pain_event-stimulus.csv"))

# merge the three dataframes ___________________________________________________
df_merge1 <- inner_join(beh, NPS, by = "singletrial_fname")
dfmerge <- inner_join(df_merge1, SIIPS, by = "singletrial_fname")
# df_merge <- inner_join(beh, all_nps, by = "singletrial_fname")


dfmerge <- dfmerge %>%
  mutate(singletrial_fname = as.character(singletrial_fname)) %>%
  extract(singletrial_fname, into = c("sub", "ses", "run", "runtype", "event", "trial", "cuetype", "stimintensity"),
          regex = "^(sub-\\d+)_(ses-\\d+)_(run-\\d+)_runtype-(pain|vicarious|cognitive)_event-(cue|stimulus)_trial-(\\d+)_cuetype-(high|low)(?:_stimintensity-(high|med|low))?\\.nii.gz$",
          remove = FALSE)

dfmerge$NPS <- dfmerge$nps
# create folder to save data ___________________________________________________
analysis_dir <- "/Users/h/Documents/projects_local/cue_expectancy/resources/plots_dissertation/ch4"
dir.create(analysis_dir,
           showWarnings = FALSE,
           recursive = TRUE)
savedir <- analysis_dir
lowcuehex <-  "#4274AD" # "#575a5e"
highcuehex <-  "#C5263A" #"#FEAE00"
```

```{r}
data <-dfmerge[dfmerge$runtype == "pain" ,]

# contrast code ________________________________________________________________
data$stim <- NA; data$STIM_linear <- NA; data$STIM_quadratic <- NA;
data$CUE_high_gt_low <- NA;
data$SES_linear <- NA;data$SES_quadratic <- NA
data$stim[data$stimulusintensity == "low_stim"] <-  -0.5 # social influence task
data$stim[data$stimulusintensity == "med_stim"] <- 0 # no influence task
data$stim[data$stimulusintensity == "high_stim"] <-  0.5 # no influence task

data$STIM <- factor(data$stimulusintensity)
data$SUB <- factor(data$sub)

# undo string format
data$outcomerating_impute[data$outcomerating_impute == "n/a"] <- NA
data$outcomerating_impute <- as.numeric(data$outcomerating_impute)

data$expectrating_impute[data$expectrating_impute == "n/a"] <- NA
data$expectrating_impute <- as.numeric(data$expectrating_impute)

# contrast code 1 linear
data$STIM_linear[data$stimulusintensity == "low_stim"] <- -0.5
data$STIM_linear[data$stimulusintensity == "med_stim"] <- 0
data$STIM_linear[data$stimulusintensity == "high_stim"] <- 0.5

# contrast code 2 quadratic
data$STIM_quadratic[data$stimulusintensity == "low_stim"] <- -0.33
data$STIM_quadratic[data$stimulusintensity == "med_stim"] <- 0.66
data$STIM_quadratic[data$stimulusintensity == "high_stim"] <- -0.33

# social cude contrast
data$CUE_high_gt_low[data$cue == "low_cue"] <-  -0.5 # social influence task
data$CUE_high_gt_low[data$cue == "high_cue"] <-  0.5 # no influence task

data$EXPECT <- data$expectrating
data$OUTCOME <- data$outcomerating


data$SES_linear[data$ses == "ses-01"] <- -0.5
data$SES_linear[data$ses == "ses-03"] <- 0
data$SES_linear[data$ses == "ses-04"] <- 0.5

# contrast code 2 quadratic
data$SES_quadratic[data$ses == "ses-01"] <- -0.33
data$SES_quadratic[data$ses == "ses-03"] <- 0.66
data$SES_quadratic[data$ses == "ses-04"] <- -0.33

stim_con1 <- "STIM_linear"
stim_con2 <- "STIM_quadratic"
iv1 <- "CUE_high_gt_low"

subjects_with_inadequate_data <- data %>%
  group_by(sub, CUE_high_gt_low, STIM_linear) %>% #SES_linear,
  dplyr::summarise(count = n(), .groups = 'drop') %>%
  filter(count < 3) %>%
  distinct(sub) %>%
  pull(sub)
df_filter <- data %>%
  filter(!(sub %in% subjects_with_inadequate_data))

print(sprintf("after filtering out subjects that have less than 3 trials in cell, we have N=%d -> N=%d",length(unique(data$sub)), length(unique(df_filter$sub)) ))
```

## 1 Beh

### Beh linear analysis

```{r}
model.beh <- lmer(outcomerating_impute ~ CUE_high_gt_low*STIM_linear +
       CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low+STIM_linear|sub), data = df_filter)
summary(model.beh)
sjPlot::tab_model(model.beh,
                  title = "Multilevel-modeling: \nlmer(Outcome ~ CUE * STIM + (CUE + STIM | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(model.beh)
```

### Beh lineplot

```{r}
dv <- "outcomerating_impute"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name <- NA; df_filter$stim_name <- NA
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
stimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
stimcue_groupwise <- summarySEwithin(
  data = stimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
stimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "Outcome rating"
ylim <- c(-10, 60)
dv_keyword <- "outcomerating"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
subset <- stimcue_groupwise[stimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(subset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = "Within pain task: Outcome rating",
                        xlab = "Stimulus intensity", ylab = "Outcome rating")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim(42, 87)
ggsave(file.path(analysis_dir, paste0("beh_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)


k <- cueR::plot_lineplot_twofactor_subset(stimcue_groupwise,
                                    taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("Beh N =", length(unique(stimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "Outcome rating")
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
 ylim(42, 87)
k
ggsave(file.path(analysis_dir, paste0("beh_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)
```

## 2 NPS (df_filter)

### NPS linear analysis

```{r}
model.NPS <- lmer(NPS ~ CUE_high_gt_low*STIM_linear +
       CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low+STIM_linear|sub), data = df_filter)
summary(model.NPS)
sjPlot::tab_model(model.NPS,
                  title = "Multilevel-modeling: \nlmer(NPS ~ CUE * STIM + (CUE + STIM | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(model.NPS)
sink("/Users/h/Desktop/NPS_output.txt")  # Redirects output to 'my_output.txt'
summary(model.NPS)
sink(NULL)  #
```

### lineplot

```{r}
dv <- "NPS"
taskname <- "pain"
subject <- "sub"

# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "NPS"
ylim <- c(-10, 60)
dv_keyword <- "nps"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
NPSsubset <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(NPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim( 3,6)
ggsave(file.path(analysis_dir, paste0("NPS_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)


k <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
  ylim( 4,6)
k
ggsave(file.path(analysis_dir, paste0("NPS_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)
```

### NPS pos (df_filter)

```{r}
dv <- "npspos"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "NPSpos"
ylim <- c(-10, 60)
dv_keyword <- "nps"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
NPSsubset <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(NPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim( 3,6)
ggsave(file.path(analysis_dir, paste0("NPSpos_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)


k <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise,taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("NPS (pos) N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPS")
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2)
k
ggsave(file.path(analysis_dir, paste0("NPSpos_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)
```

### NPS neg

```{r}
dv <- "npsneg"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-
  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
# DATA$levels_ordered <- factor(DATA$param_stimulus_type, levels=c("low", "med", "high"))
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "NPSneg"
ylim <- c(-10, 60)
dv_keyword <- "nps"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
NPSsubset <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(NPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPSneg")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim( 3,6)
ggsave(file.path(analysis_dir, paste0("NPSneg_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)


k <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("NPSneg N =", length(unique(NPSstimcue_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "NPSneg")
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2)
k
ggsave(file.path(analysis_dir, paste0("NPSneg_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)
```

## 3 PE (PEmerge)

```{r}
PEdf <- read.csv("/Users/h/Documents/projects_local/cue_expectancy/data/RL/modelfit_072024/table_pain.csv")


clean_filename <- function(filename) {
  # Remove _cue that follows cuetype-
  filename <- gsub("(cuetype-[^_]+)_cue", "\\1", filename)
  # Remove _stim_stim that follows stimintensity-
  filename <- gsub("(stimintensity-[^_]+)_stim_stim", "\\1", filename)
  return(filename)
}

# Apply the function to the singletrial_fname column
PEdf$singletrial_fname <- sapply(PEdf$singletrial_fname, clean_filename)
PEmerge<- inner_join(df_filter, PEdf, by = "singletrial_fname")
#
# PEmerge <- merge(dfmerge, PEdf, by = c("src_subject_id", "session_id", "param_run_num", "trial_index"))
print(PEmerge)

PE.model <- lmer(nps ~ PE_mdl2 + (1 | sub), data=PEmerge, control = lmerControl(optimizer = "nmkbw"))
summary(PE.model)
sjPlot::tab_model(PE.model,
                  title = "Multilevel-modeling: \nlmer(NPS ~ PE + (PE | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(PE.model)
sink("/Users/h/Desktop/PEnps_output.txt")  # Redirects output to 'my_output.txt'
summary(PE.model)
sink(NULL)  #

library(ggplot2)

min_value <- -40
max_value <- 40
PEmerge$sub <- factor(PEmerge$sub)
plot.PE_NPS <- ggplot(PEmerge, aes(x = PE_mdl2, y = NPS)) +
  geom_point(aes(colour = sub), size = .1) +  # Points colored by subject
  geom_smooth(aes(colour = sub), method = 'lm', formula = y ~ x, se = FALSE, size = .3, linetype = "dashed") +  # Subject-wise regression lines
  geom_smooth(method = 'lm', formula = y ~ x, se = FALSE, size = .5, color = "black") +  # Group regression line
  # ylim(min_value, max_value) +  # Set y-axis limits
  # xlim(min_value, max_value)
  theme_classic2()  +# Use a theme with a white background
xlab("PE") +
   theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 24, ), # Axis titles
          axis.text = element_text(size = 18), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5),# Plot title
         legend.position = "none"
          )
plot.PE_NPS
ggsave("/Users/h/Documents/projects_local/cue_expectancy/resources/plots_dissertation/ch4/ch4_NPSPE.png",plot.PE_NPS, w=5, h=4, dpi=300)

```

Q. How correlated are the outcome ratings and NPS values??
TODO: can we indicate correlation based on significance

```{r}
library(corrplot)
PEmerge$cuetype_numeric <- as.numeric(as.factor(PEmerge$cuetype))
PEmerge$beh_PE <- PEmerge$outcomerating_impute - PEmerge$expectrating_impute
subset <- PEmerge[, c( "outcomerating_impute", "expectrating_impute", "PE_mdl2","beh_PE", "nps", "npspos",  "npsneg", "cuetype_numeric") ]
cor_matrix <- cor(subset, use = "pairwise.complete.obs")
corrplot(cor_matrix, method = "circle")

# Correlation between cuetype and model-based PE
correlation_model <- cor_matrix["cuetype_numeric", "PE_mdl2"]
print(correlation_model)

# Correlation between cuetype and behavioral PE
correlation_behavioral <- cor_matrix["cuetype_numeric", "beh_PE"]
print(correlation_behavioral)
```

### Tor suggestion lmer with PE

> What to do when model fit is singular
> https://stackoverflow.com/questions/54597496/how-to-cope-with-a-singular-fit-in-a-linear-mixed-model-lme4
> Singular. it means 
* one or more random effect variances are near zero, 
* and the model is overfitting or overparameterized (can't estimate all the random slopes reliably).
> suggestion. Simplify the random effect structure

Q. Why would i have a singular random effets structure?
A1. when you have too many variables you're fitting. You include too many random slopes relative to the amount of data
A2. the predictors are highly collinear (PE is similar to cue)

NPS pos is predicted by cue and stim
Once PE comes into the picture, matrix is singular
```{r message=TRUE, warning=TRUE}
summary(NPSPE)
```

```{r echo=TRUE}
#
# model.NPS <- lmer(NPS ~ CUE_high_gt_low*STIM_linear +
#        CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low+STIM_linear|sub), data = df_filter)

NPSPE <- lmer(NPS ~ STIM_linear +
       STIM_quadratic  + PE_mdl2 + (1 + PE_mdl2   |sub),
     data = PEmerge)
     # control=lmerControl(isSingular(NPSPE, tol = 1e-4)))
       # check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
summary(NPSPE)
sjPlot::tab_model(NPSPE,
                  title = "model 2: \nlmer(NPS ~ cue + stim + PE + (cue + stim | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(NPSPE)

library(sjPlot)

# Plot marginal effect (partial slope) for PE
library(ggplot2)

# Plot fitted values by subject
PEmerge$predicted_NPS <- predict(NPSPE)
# ggplot(PEmerge, aes(x = PE_mdl2, y = predicted_NPS, color = sub)) +
#   geom_line(aes(group = sub), alpha = 0.4) +
#   labs(title = "Subject-specific predicted slopes for Prediction Error",
#        x = "Prediction Error (PE_mdl2)",
#        y = "Predicted NPS") +
#   theme_minimal(base_size = 14) +
#   theme(legend.position = "none")
library(dplyr)

PEmerge_sorted <- PEmerge %>%
  arrange(sub, PE_mdl2)

ggplot(PEmerge_sorted, aes(x = PE_mdl2, y = predicted_NPS, color = sub)) +
  geom_line(aes(group = sub), alpha = 0.4) +
  labs(title = "Subject-specific predicted slopes for Prediction Error",
       x = "Prediction Error (PE_mdl2)",
       y = "Predicted NPS") +
  theme_minimal(base_size = 14) +
  theme(legend.position = "none")

library(ggeffects)
library(ggplot2)

# Get predicted values for PE across its range
pe_pred <- ggpredict(NPSPE, terms = "PE_mdl2")

# Plot
ggplot(pe_pred, aes(x = x, y = predicted)) +
  geom_line() +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = 0.2) +
  labs(title = "Partial Effect of Prediction Error on NPS",
       x = "Prediction Error (PE_mdl2)",
       y = "Predicted NPS") +
  theme_minimal(base_size = 14)

plot_model(NPSPE, type = "eff", terms = c( "STIM_linear","PE_mdl2"))



# # model 2: NPS PE positive values
# NPSPEpos <- lmer(npspos~ cuetype + stimintensity + PE_mdl2 + (cuetype + stimintensity|sub),
#      data = PEmerge,
#      control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
# summary(NPSPEpos)
# vif(NPSPEpos)
# sjPlot::tab_model(NPSPEpos,
#                   title = "model 2: \nlmer(NPSpos ~ cue + stim + PE + (cue + stim | sub), data = pain)",
#                   CSS = list(css.table = '+font-size: 12;'))
# equatiomatic::extract_eq(NPSPEpos)
```

## 4 NPS nociceptive ROI (df_filter.NA)

```{r}
# STEP 0. filter data __________________________________________________________
# Make sure that each condition cell has adequate amount of trials
dependent_vars <- c("nps","npspos","npsneg",
                    "pos_nps_vermis","pos_nps_rIns","pos_nps_rV1","pos_nps_rThal",
                    "pos_nps_lIns","pos_nps_rdpIns","pos_nps_rS2_Op","pos_nps_dACC",
                    "neg_nps_rLOC","neg_nps_lLOC","neg_nps_rpLOC","neg_nps_pgACC","neg_nps_lSTS","neg_nps_rIPL","neg_nps_PCC"  )
newlist <- c("NPS","NPS positive weights","NPS negative weights",
                    "Vermis","rIns","rV1","rThal",
                    "lIns","rdpIns","rS2 & Op","dACC",
                    "neg_nps_rLOC","neg_nps_lLOC","neg_nps_rpLOC","neg_nps_pgACC","neg_nps_lSTS","neg_nps_rIPL","neg_nps_PCC"  )

# Initialize a DataFrame to collect combined results
combined_se_calc_cooksd <- data.frame()
for (i in seq_along(dependent_vars)) {

  dv <- dependent_vars[i]
  dv_title <- newlist[i]
  print(paste("_______",i, dv, "_______"))

subjects_with_inadequate_data <- data %>%
  group_by(sub, CUE_high_gt_low, STIM_linear) %>% #SES_linear,
  dplyr::summarise(count = n(), .groups = 'drop') %>%
  filter(count < 3) %>%
  distinct(sub) %>%
  pull(sub)
df_filter <- data %>%
  filter(!(sub %in% subjects_with_inadequate_data))

print(sprintf("after filtering out subjects that have less than 3 trials in cell, we have N=%d -> N=%d",length(unique(data$sub)), length(unique(df_filter$sub)) ))

## QC. check NPS distribution __________________________________________________

df_filter.NA <- df_filter %>% filter(!is.na(nps))  # Remove NA values
head(df_filter.NA)


combined_se_calc_cooksd <- data.frame()
taskname = "pain"
ggtitle <- paste(taskname, " - NPS (degree)")
title <- paste(taskname, " - actual")
subject <- "sub"
w <- 10
h <- 6


analysis_dir <-
  file.path(main_dir,
            "analysis",
            "mixedeffect",
            analysis_folder,
            as.character(Sys.Date())) # nolint
dir.create(analysis_dir,
           showWarnings = FALSE,
           recursive = TRUE)
savedir <- analysis_dir

df_filter.NA <- df_filter.NA %>%
    group_by(sub) %>%
    mutate(NPS_scaled = scale(nps)) %>%
    ungroup()

# 1. rename reordering for plots ______________________________________________
df_filter.NA$cue_name <- NA; df_filter.NA$stim_name <- NA
df_filter.NA$cue_name[df_filter.NA$cue == "high_cue"] <-  "high cue"
df_filter.NA$cue_name[df_filter.NA$cue == "low_cue"] <-  "low cue"

df_filter.NA$stim_name[df_filter.NA$stimulusintensity == "high_stim"] <-  "High"
df_filter.NA$stim_name[df_filter.NA$stimulusintensity == "med_stim"] <-  "Med"
df_filter.NA$stim_name[df_filter.NA$stimulusintensity == "low_stim"] <-  "Low"

df_filter.NA$stim_ordered <- factor(df_filter.NA$stim_name, levels = c("Low", "Med", "High"))
df_filter.NA$cue_ordered <- factor(df_filter.NA$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"


# 2. lmer model ________________________________________________________________
model_savefname <- file.path(
  analysis_dir,
  paste(
    "lmer_nps-",dv,"_",as.character(Sys.Date()),".txt",
    sep = ""
  )
)
formula_string <- paste(dv, "~ CUE_high_gt_low * STIM_linear + CUE_high_gt_low * STIM_quadratic + (CUE_high_gt_low + STIM_linear | sub)")
model_formula <- as.formula(formula_string)
# Fit the model
sink(model_savefname)
# This control parameter which ignores the singular warning allows for a zero estimate.
# preoceed with caution. # Read: https://stackoverflow.com/questions/54597496/how-to-cope-with-a-singular-fit-in-a-linear-mixed-model-lme4
model.npscuestim <- lmer(model_formula, data = df_filter.NA,
                         control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
                         # control = lmerControl(optimizer = "nmkbw"))
print(summary(model.npscuestim))
sink()

# Check why convergence is negative ____________________________________________
aa <- lme4::allFit(model.npscuestim)
opt.summary <- glance(aa) |> select(optimizer, AIC, NLL_rel) |> arrange(NLL_rel)
print(opt.summary)


# R2 ___________________________________________________________________________
r_squared <- MuMIn::r.squaredGLMM(model.npscuestim)
R2m = r_squared[1] # Marginal R-squared
R2c = r_squared[2]  # Conditional R-squared



# summary statistics calculate mean and se  ____________________________________
NPSstimcue_subjectwise <- meanSummary(df_filter.NA,
                                      c(subject, model_iv1, model_iv2), dv)
NPSstimcue_groupwise <- summarySEwithin(
  data = NPSstimcue_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
NPSstimcue_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

combined_se_calc_cooksd <-
  rbind(combined_se_calc_cooksd, NPSstimcue_groupwise)

# plot parameters ______________________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
subject <- "sub"
ggtitle <- paste(taskname, " - NPS Cooksd removed")
title <- paste(taskname, " - NPS")
xlab <- ""
ylab <- "NPS (degree)"
ylim <- c(-1,1)
dv_keyword <- "NPS"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c("#4274AD", "#C5263A")
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), "_cooksd.png",
    sep = ""
  )
)



### Lineplots with only low cue ____________________________________
subsetNPS <- NPSstimcue_groupwise[NPSstimcue_groupwise$cue_ordered == "low cue",]
g <- cueR::plot_lineplot_twofactor_subset(subsetNPS, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = "#4274AD", "#D73027"),
                        ggtitle = dv_title,
                        xlab = "Stimulus intensity", ylab = "Mean score")
g + theme(aspect.ratio=.8)


### Lineplots________________________________________________________________________
g <- cueR::plot_lineplot_twofactor_subset(NPSstimcue_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = "#4274AD","high cue" = "#C5263A"),
                        ggtitle = dv_title,
                        xlab = "Stimulus intensity", ylab = "Mean score")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2)  # Adjust point size


#   print(arranged_plot)
  plot_filename = file.path(analysis_dir,
  paste('lineplot_task-all_rating-', dv_keyword,dv, '.svg', sep = ""))

print(g)

  ggsave(plot_filename, g, width = 8, height = 4, dpi=300)
}
```

## 5 SIIPS (df_filter.NA)

### SIIPS linear analysis

```{r}


model.SIIPS <- lmer(SIIPS ~ CUE_high_gt_low*STIM_linear +
       CUE_high_gt_low*STIM_quadratic + (CUE_high_gt_low*STIM_linear|sub),
       data = df_filter.NA,
       control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
summary(model.SIIPS)
sjPlot::tab_model(model.SIIPS,
                  title = "Multilevel-modeling: \nlmer(SIIPS ~ CUE * STIM + (CUE + STIM | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))

sink("/Users/h/Desktop/SIIPS_output.txt")  # Redirects output to 'my_output.txt'
summary(model.SIIPS)
equatiomatic::extract_eq(model.SIIPS)
sink(NULL)  #
```

```{r}
dv <- "SIIPS"
taskname <- "pain"
subject <- "sub"
df_filter$stim_name <- NA; df_filter$cue_name <- NA
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"

df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
SIIPS_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2), dv)
SIIPS_groupwise <- summarySEwithin(
  data = SIIPS_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2),
  idvar = subject
)
SIIPS_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "SIIPS"
ylim <- c(-10, 60)
dv_keyword <- "siips"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
SIIPSsubset <- SIIPS_groupwise[SIIPS_groupwise$cue_ordered == "low cue",]
g <- plot_lineplot_twofactor(SIIPSsubset,
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex), ggtitle = paste0("NPS N =", length(unique(SIIPS_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "SIIPS")
g <- g + theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
   ylim(465, 650)
ggsave(file.path(analysis_dir, paste0("SIIPS_maineffect", as.character(Sys.Date()), "_lowcue.svg")), plot=g, w=4, h=4, dpi=300)

k <- cueR::plot_lineplot_twofactor_subset(SIIPS_groupwise, taskname = "pain",
                        iv1 = "stim_ordered", iv2 = "cue_ordered",
                        mean = "mean_per_sub_norm_mean", error = "se",
                        color = c("low cue" = lowcuehex,"high cue" = highcuehex),
                        ggtitle = paste0("SIIPS N =", length(unique(SIIPS_subjectwise$sub))),
                        xlab = "Stimulus intensity", ylab = "SIIPS")
k<- k+ theme(aspect.ratio=1,
          text = element_text(size = 18), # Default text size for the plot
          axis.title = element_text(size = 22, ), # Axis titles
          axis.text = element_text(size = 15), # Axis text (x and y)
          plot.title = element_text(size = 24, hjust = 0.5) # Plot title
          ) +
  # geom_line(size = 1) + # Adjust line thickness
  geom_point(size = 2) + # Adjust point size
 ylim(465, 650)
k
ggsave(file.path(analysis_dir, paste0("SIIPS_maineffect", as.character(Sys.Date()), ".svg")), plot=k, w=4, h=4, dpi=300)
```

## 5-3 SIIPS and PE (PEmerge)

```{r}

SIIPSPE <- lmer(SIIPS ~ CUE_high_gt_low + STIM_linear + PE_mdl2 + (CUE_high_gt_low + STIM_linear   |sub),
     data = PEmerge,
     control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
summary(SIIPSPE)
vif(SIIPSPE)
```

## Session wise filter

```{r}
subjects_with_inadequate_data <- data %>%
  group_by(sub, CUE_high_gt_low, STIM_linear, SES_linear) %>% #SES_linear,
  dplyr::summarise(count = n(), .groups = 'drop') %>%
  filter(count < 4) %>%
  distinct(sub) %>%
  pull(sub)
df_filter_ses <- data %>%
  filter(!(sub %in% subjects_with_inadequate_data))

print(sprintf("after filtering out subjects that have less than 3 trials in cell, we have N=%d -> N=%d",length(unique(data$sub)), length(unique(df_filter_ses$sub)) ))
```

## 4 Sessionwise Beh

```{r}
model.behses <- lmer(outcomerating_impute ~
                          CUE_high_gt_low*STIM_linear*SES_linear +
                          CUE_high_gt_low*STIM_quadratic*SES_linear +
                          CUE_high_gt_low*STIM_linear*SES_quadratic +
                          CUE_high_gt_low*STIM_quadratic*SES_quadratic +
                          (CUE_high_gt_low + STIM_linear + SES_linear |
         sub), data = df_filter_ses,
         control=lmerControl(check.conv.singular = .makeCC(action = "ignore",  tol = 1e-4)))
         # control = lmerControl(optimizer = "nmkbw"
                               # ))
summary(model.behses)
sjPlot::tab_model(model.behses,
                  title = "Multilevel-modeling: \nlmer(BEH ~ CUE * STIM *SES + (CUE + STIM + SES | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(model.behses)

# Check why convergence is negative ____________________________________________
abeh <- lme4::allFit(model.behses)
opt.summary <- glance(abeh) |> select(optimizer, AIC, NLL_rel) |> arrange(NLL_rel)
print(opt.summary)
```

## 5 Sessionwise NPS

```{r}
model.NPSses <- lmer(NPS ~
                          CUE_high_gt_low*STIM_linear*SES_linear +
                          CUE_high_gt_low*STIM_quadratic*SES_linear +
                          CUE_high_gt_low*STIM_linear*SES_quadratic +
                          CUE_high_gt_low*STIM_quadratic*SES_quadratic +
                          (CUE_high_gt_low + STIM_linear +SES_linear |
         sub), data = df_filter_ses,
    control = lmerControl(
      optimizer = "optimx",
      optCtrl = list(
        method = "nmkb",
        maxit = 1e9,
        maxfun = 1e9,
        maxeval = 1e7,
        xtol_abs = 1e-9,
        ftol_abs = 1e-9
      )
    )
  )
summary(model.NPSses)
sjPlot::tab_model(model.NPSses,
                  title = "Multilevel-modeling: \nlmer(NPS ~ CUE * STIM * SES+ (CUE + STIM + SES| sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(model.NPSses)
```

```{r}
optimx_options <- c("L-BFGS-B", "nlminb", "nlm", "bobyqa", "nmkb", "hjkb")
for(i in 1:length(optimx_options)){
  print(paste0("Testing optimx: ", optimx_options[i]))
  model_flex <-   lmer(
    NPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      (CUE_high_gt_low + SES_linear + SES_quadratic + STIM_linear + STIM_quadratic |
         sub),
    data = df_filter_ses,
    control = lmerControl(
      optimizer = "optimx",
      optCtrl = list(
        method = optimx_options[i],
        maxit = 1e9,
        maxfun = 1e9,
        maxeval = 1e7,
        xtol_abs = 1e-9,
        ftol_abs = 1e-9
      )
    )
  )

  if(is.null(model_flex@optinfo$conv$lme4$messages)){
    print(paste0("One of the optimx options, ", optimx_options[i],", worked!"))
    print(model_flex)
    break
  }
}
```

## 6 Sessionwise SIIPS

> random slopes of cue effects were colinear with random intercepts with a correlation of 1, therefore estimating the idnvidual difference baseline and dindividual difference of cue effects were impossible

```{r}
with(df_filter_ses, table(CUE_high_gt_low, STIM_linear, SES_linear))
```

```{r}
library(glmmTMB)
model.SIIPSses <- lmer(SIIPS ~
                          CUE_high_gt_low*STIM_linear*SES_linear +
                          CUE_high_gt_low*STIM_quadratic*SES_linear +
                          CUE_high_gt_low*STIM_linear*SES_quadratic +
                          CUE_high_gt_low*STIM_quadratic*SES_quadratic +
                          (1+ CUE_high_gt_low + STIM_quadratic + SES_linear|sub),
         data = df_filter_ses,
                  control = lmerControl(optimizer = "nmkbw"
                               ))


summary(model.SIIPSses)
sjPlot::tab_model(model.SIIPSses,
                  title = "Multilevel-modeling: \nlmer(SIIPS ~ CUE * STIM * SES+ (CUE + STIM *SES | sub), data = pain)",
                  CSS = list(css.table = '+font-size: 12;'))
equatiomatic::extract_eq(model.SIIPSses)
```

> Session is inversly colinear with stimulus intensity, which makes the random slopes impossible to estimate

```{r}


library(ggplot2); theme_set(theme_bw())
library(ggalt)
(ggplot(df_filter_ses, aes(STIM_linear,SIIPS,SES_linear,colour=CUE_high_gt_low))
    +geom_point()
    +geom_encircle(aes(group=sub))
    +theme(legend.position="none")
    + scale_y_log10()
)
```

### linear estimator

```{r}
optimx_options <- c("L-BFGS-B", "nlminb", "nlm", "bobyqa", "nmkb", "hjkb")
for(i in 1:length(optimx_options)){
  print(paste0("Testing optimx: ", optimx_options[i]))
  model_flex <-   lmer(
    SIIPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      (1+ CUE_high_gt_low + STIM_quadratic + SES_linear  |
         sub),
    data = df_filter_ses,
    control = lmerControl(
      optimizer = "optimx",
      optCtrl = list(
        method = optimx_options[i],
        maxit = 1e9,
        maxfun = 1e9,
        maxeval = 1e3,
        xtol_abs = 1e-3,
        ftol_abs = 1e-3
      )
    )
  )

  if(is.null(model_flex@optinfo$conv$lme4$messages)){
    print(paste0("One of the optimx options, ", optimx_options[i],", worked!"))
    print(model_flex)
    break
  }
}
```

```{r}
# non linear options
algoptions <- c("NLOPT_LN_PRAXIS", "NLOPT_GN_CRS2_LM",
"NLOPT_LN_COBYLA", "NLOPT_LN_NEWUOA",
"NLOPT_LN_NEWUOA_BOUND", "NLOPT_LN_NELDERMEAD",
"NLOPT_LN_SBPLX", "NLOPT_LN_BOBYQA")

for(i in 1:length(algoptions)){
  print(paste0("Testing nloptwrap: ", algoptions[i]))
  model_flex <- lmer(    SIIPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      ( 1 +CUE_high_gt_low + STIM_quadratic + SES_linear  |
         sub),
                     data = df_filter_ses, REML=FALSE,
                     control = lmerControl(optimizer = "nloptwrap",
                                           optCtrl = list(algorithm = algoptions[i],
                                                          maxit = 1e9,
                                                          maxfun = 1e9,
                                                          maxeval = 1e9,
                                                          xtol_abs = 1e-2,
                                                          ftol_abs = 1e-2)))
  if(is.null(model_flex@optinfo$conv$lme4$messages)){
    print(paste0("One of the nloptwrap options, ", algoptions[i],", worked!"))
    print(model_flex)
    break
  }
}
```

```{r}
  model.glmm <- glmmTMB::glmmTMB(    SIIPS ~
      CUE_high_gt_low * STIM_linear * SES_linear +
      CUE_high_gt_low * STIM_quadratic * SES_linear +
      CUE_high_gt_low * STIM_linear * SES_quadratic +
      CUE_high_gt_low * STIM_quadratic * SES_quadratic +
      ( CUE_high_gt_low + SES_linear  + STIM_linear  |         sub),
                     data = df_filter_ses)
summary(model.glmm)
```

```{r}
dv <- "SIIPS"
taskname <- "pain"
subject <- "sub"
# Plot 1: reordering variables for plots _______________________________________
df_filter$cue_name[df_filter$cue == "high_cue"] <-  "high cue"
df_filter$cue_name[df_filter$cue == "low_cue"] <-  "low cue"

df_filter$stim_name[df_filter$stimulusintensity == "high_stim"] <-  "High"
df_filter$stim_name[df_filter$stimulusintensity == "med_stim"] <-  "Med"
df_filter$stim_name[df_filter$stimulusintensity == "low_stim"] <-  "Low"
df_filter$stim_ordered <- factor(df_filter$stim_name, levels = c("Low", "Med", "High"))
df_filter$cue_ordered <- factor(df_filter$cue_name, levels = c("low cue", "high cue"))
model_iv1 <- "stim_ordered"
model_iv2 <- "cue_ordered"

# Plot 2: summary statistics  __________________________________________________
SIIPS_subjectwise <- meanSummary(df_filter,
                                      c(subject, model_iv1, model_iv2, "ses"), dv)
SIIPS_groupwise <- summarySEwithin(
  data = SIIPS_subjectwise,
  measurevar = "mean_per_sub",
  withinvars = c(model_iv1, model_iv2, "ses"),
  idvar = subject
)
SIIPS_groupwise$task <- taskname
# https://stackoverflow.com/questions/29402528/append-data-frames-together-in-a-for-loop/29419402

# Plot 3: plot parameters  _____________________________________________________
sub_mean <- "mean_per_sub"
group_mean <- "mean_per_sub_norm_mean"
se <- "se"
ggtitle <- paste(taskname, " - beh Cooksd removed")
title <- paste(taskname, " - beh")
xlab <- ""
ylab <- "SIIPS"
ylim <- c(-10, 60)
dv_keyword <- "siips"
if (any(startsWith(dv_keyword, c("expect", "Expect")))) {
  color <- c("#1B9E77", "#D95F02")
} else {
  color <- c(lowcuehex, highcuehex)
} # if keyword starts with
plot_savefname <- file.path(
  analysis_dir,
  paste(
    "raincloud_task-", taskname, "_rating-", dv_keyword, "_", as.character(Sys.Date()), ".png",
    sep = ""
  )
)
```
